2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drm.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
42 * NV10-NV40 tiling helpers
46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg;
51 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
52 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
53 struct nouveau_engine *engine;
55 nouveau_fence_unref(®->fence);
58 pfb->tile.fini(pfb, i, tile);
61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
63 pfb->tile.prog(pfb, i, tile);
65 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
66 engine->tile_prog(engine, i);
67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
71 static struct nouveau_drm_tile *
72 nv10_bo_get_tile_region(struct drm_device *dev, int i)
74 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
77 spin_lock(&drm->tile.lock);
80 (!tile->fence || nouveau_fence_done(tile->fence)))
85 spin_unlock(&drm->tile.lock);
90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
93 struct nouveau_drm *drm = nouveau_drm(dev);
96 spin_lock(&drm->tile.lock);
97 tile->fence = (struct nouveau_fence *)fence_get(fence);
99 spin_unlock(&drm->tile.lock);
103 static struct nouveau_drm_tile *
104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
107 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
109 struct nouveau_drm_tile *tile, *found = NULL;
112 for (i = 0; i < pfb->tile.regions; i++) {
113 tile = nv10_bo_get_tile_region(dev, i);
115 if (pitch && !found) {
119 } else if (tile && pfb->tile.region[i].pitch) {
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
124 nv10_bo_put_tile_region(dev, tile, NULL);
128 nv10_bo_update_tile_region(dev, found, addr, size,
134 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
138 struct nouveau_bo *nvbo = nouveau_bo(bo);
140 if (unlikely(nvbo->gem.filp))
141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
142 WARN_ON(nvbo->pin_refcnt > 0);
143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
148 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
149 int *align, int *size)
151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
152 struct nvif_device *device = &drm->device;
154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
155 if (nvbo->tile_mode) {
156 if (device->info.chipset >= 0x40) {
158 *size = roundup(*size, 64 * nvbo->tile_mode);
160 } else if (device->info.chipset >= 0x30) {
162 *size = roundup(*size, 64 * nvbo->tile_mode);
164 } else if (device->info.chipset >= 0x20) {
166 *size = roundup(*size, 64 * nvbo->tile_mode);
168 } else if (device->info.chipset >= 0x10) {
170 *size = roundup(*size, 32 * nvbo->tile_mode);
174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
178 *size = roundup(*size, PAGE_SIZE);
182 nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
184 struct sg_table *sg, struct reservation_object *robj,
185 struct nouveau_bo **pnvbo)
187 struct nouveau_drm *drm = nouveau_drm(dev);
188 struct nouveau_bo *nvbo;
191 int type = ttm_bo_type_device;
196 lpg_shift = drm->client.vm->vmm->lpg_shift;
197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
199 if (size <= 0 || size > max_size) {
200 NV_WARN(drm, "skipped size %x\n", (u32)size);
205 type = ttm_bo_type_sg;
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
212 INIT_LIST_HEAD(&nvbo->vma_list);
213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
215 nvbo->bo.bdev = &drm->ttm.bdev;
217 if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
220 nvbo->page_shift = 12;
221 if (drm->client.vm) {
222 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
223 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
226 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
228 nouveau_bo_placement_set(nvbo, flags, 0);
230 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
231 sizeof(struct nouveau_bo));
233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
234 type, &nvbo->placement,
235 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
236 robj, nouveau_bo_del_ttm);
238 /* ttm will call nouveau_bo_del_ttm if it fails.. */
247 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
251 if (type & TTM_PL_FLAG_VRAM)
252 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
253 if (type & TTM_PL_FLAG_TT)
254 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
255 if (type & TTM_PL_FLAG_SYSTEM)
256 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
260 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
263 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
264 unsigned i, fpfn, lpfn;
266 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
267 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
268 nvbo->bo.mem.num_pages < vram_pages / 4) {
270 * Make sure that the color and depth buffers are handled
271 * by independent memory controller units. Up to a 9x
272 * speed up when alpha-blending and depth-test are enabled
275 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
276 fpfn = vram_pages / 2;
280 lpfn = vram_pages / 2;
282 for (i = 0; i < nvbo->placement.num_placement; ++i) {
283 nvbo->placements[i].fpfn = fpfn;
284 nvbo->placements[i].lpfn = lpfn;
286 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
287 nvbo->busy_placements[i].fpfn = fpfn;
288 nvbo->busy_placements[i].lpfn = lpfn;
294 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
296 struct ttm_placement *pl = &nvbo->placement;
297 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
298 TTM_PL_MASK_CACHING) |
299 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
301 pl->placement = nvbo->placements;
302 set_placement_list(nvbo->placements, &pl->num_placement,
305 pl->busy_placement = nvbo->busy_placements;
306 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
309 set_placement_range(nvbo, type);
313 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
316 struct ttm_buffer_object *bo = &nvbo->bo;
319 ret = ttm_bo_reserve(bo, false, false, false, NULL);
323 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
324 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
325 1 << bo->mem.mem_type, memtype);
330 if (nvbo->pin_refcnt)
333 nouveau_bo_placement_set(nvbo, memtype, 0);
335 ret = nouveau_bo_validate(nvbo, false, false);
337 switch (bo->mem.mem_type) {
339 drm->gem.vram_available -= bo->mem.size;
342 drm->gem.gart_available -= bo->mem.size;
353 ttm_bo_unreserve(bo);
358 nouveau_bo_unpin(struct nouveau_bo *nvbo)
360 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
361 struct ttm_buffer_object *bo = &nvbo->bo;
364 ret = ttm_bo_reserve(bo, false, false, false, NULL);
368 ref = --nvbo->pin_refcnt;
369 WARN_ON_ONCE(ref < 0);
373 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
375 ret = nouveau_bo_validate(nvbo, false, false);
377 switch (bo->mem.mem_type) {
379 drm->gem.vram_available += bo->mem.size;
382 drm->gem.gart_available += bo->mem.size;
390 ttm_bo_unreserve(bo);
395 nouveau_bo_map(struct nouveau_bo *nvbo)
399 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
404 * TTM buffers allocated using the DMA API already have a mapping, let's
407 if (!nvbo->force_coherent)
408 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
411 ttm_bo_unreserve(&nvbo->bo);
416 nouveau_bo_unmap(struct nouveau_bo *nvbo)
422 * TTM buffers allocated using the DMA API already had a coherent
423 * mapping which we used, no need to unmap.
425 if (!nvbo->force_coherent)
426 ttm_bo_kunmap(&nvbo->kmap);
430 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
432 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
433 struct nouveau_device *device = nvkm_device(&drm->device);
434 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
440 /* Don't waste time looping if the object is coherent */
441 if (nvbo->force_coherent)
444 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
445 dma_sync_single_for_device(nv_device_base(device),
446 ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
450 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
452 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
453 struct nouveau_device *device = nvkm_device(&drm->device);
454 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
460 /* Don't waste time looping if the object is coherent */
461 if (nvbo->force_coherent)
464 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
465 dma_sync_single_for_cpu(nv_device_base(device),
466 ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
470 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
475 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
476 interruptible, no_wait_gpu);
480 nouveau_bo_sync_for_device(nvbo);
486 _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
488 struct ttm_dma_tt *dma_tt;
494 /* kmap'd address, return the corresponding offset */
497 /* DMA-API mapping, lookup the right address */
498 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
499 m = dma_tt->cpu_address[index / PAGE_SIZE];
500 m += index % PAGE_SIZE;
505 #define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
508 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
511 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
513 mem = nouveau_bo_mem_index(nvbo, index, mem);
516 return ioread16_native((void __force __iomem *)mem);
522 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
525 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
527 mem = nouveau_bo_mem_index(nvbo, index, mem);
530 iowrite16_native(val, (void __force __iomem *)mem);
536 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
539 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
541 mem = nouveau_bo_mem_index(nvbo, index, mem);
544 return ioread32_native((void __force __iomem *)mem);
550 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
553 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
555 mem = nouveau_bo_mem_index(nvbo, index, mem);
558 iowrite32_native(val, (void __force __iomem *)mem);
563 static struct ttm_tt *
564 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
565 uint32_t page_flags, struct page *dummy_read)
568 struct nouveau_drm *drm = nouveau_bdev(bdev);
569 struct drm_device *dev = drm->dev;
571 if (drm->agp.stat == ENABLED) {
572 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
573 page_flags, dummy_read);
577 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
581 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
583 /* We'll do this from user space. */
588 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
589 struct ttm_mem_type_manager *man)
591 struct nouveau_drm *drm = nouveau_bdev(bdev);
595 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
596 man->available_caching = TTM_PL_MASK_CACHING;
597 man->default_caching = TTM_PL_FLAG_CACHED;
600 man->flags = TTM_MEMTYPE_FLAG_FIXED |
601 TTM_MEMTYPE_FLAG_MAPPABLE;
602 man->available_caching = TTM_PL_FLAG_UNCACHED |
604 man->default_caching = TTM_PL_FLAG_WC;
606 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
607 /* Some BARs do not support being ioremapped WC */
608 if (nvkm_bar(&drm->device)->iomap_uncached) {
609 man->available_caching = TTM_PL_FLAG_UNCACHED;
610 man->default_caching = TTM_PL_FLAG_UNCACHED;
613 man->func = &nouveau_vram_manager;
614 man->io_reserve_fastpath = false;
615 man->use_io_reserve_lru = true;
617 man->func = &ttm_bo_manager_func;
621 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
622 man->func = &nouveau_gart_manager;
624 if (drm->agp.stat != ENABLED)
625 man->func = &nv04_gart_manager;
627 man->func = &ttm_bo_manager_func;
629 if (drm->agp.stat == ENABLED) {
630 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
631 man->available_caching = TTM_PL_FLAG_UNCACHED |
633 man->default_caching = TTM_PL_FLAG_WC;
635 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
636 TTM_MEMTYPE_FLAG_CMA;
637 man->available_caching = TTM_PL_MASK_CACHING;
638 man->default_caching = TTM_PL_FLAG_CACHED;
649 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
651 struct nouveau_bo *nvbo = nouveau_bo(bo);
653 switch (bo->mem.mem_type) {
655 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
659 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
663 *pl = nvbo->placement;
668 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
670 int ret = RING_SPACE(chan, 2);
672 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
673 OUT_RING (chan, handle & 0x0000ffff);
680 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
681 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
683 struct nouveau_mem *node = old_mem->mm_node;
684 int ret = RING_SPACE(chan, 10);
686 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
687 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
688 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
689 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
690 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
691 OUT_RING (chan, PAGE_SIZE);
692 OUT_RING (chan, PAGE_SIZE);
693 OUT_RING (chan, PAGE_SIZE);
694 OUT_RING (chan, new_mem->num_pages);
695 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
701 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
703 int ret = RING_SPACE(chan, 2);
705 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
706 OUT_RING (chan, handle);
712 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
713 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
715 struct nouveau_mem *node = old_mem->mm_node;
716 u64 src_offset = node->vma[0].offset;
717 u64 dst_offset = node->vma[1].offset;
718 u32 page_count = new_mem->num_pages;
721 page_count = new_mem->num_pages;
723 int line_count = (page_count > 8191) ? 8191 : page_count;
725 ret = RING_SPACE(chan, 11);
729 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
730 OUT_RING (chan, upper_32_bits(src_offset));
731 OUT_RING (chan, lower_32_bits(src_offset));
732 OUT_RING (chan, upper_32_bits(dst_offset));
733 OUT_RING (chan, lower_32_bits(dst_offset));
734 OUT_RING (chan, PAGE_SIZE);
735 OUT_RING (chan, PAGE_SIZE);
736 OUT_RING (chan, PAGE_SIZE);
737 OUT_RING (chan, line_count);
738 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
739 OUT_RING (chan, 0x00000110);
741 page_count -= line_count;
742 src_offset += (PAGE_SIZE * line_count);
743 dst_offset += (PAGE_SIZE * line_count);
750 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
751 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
753 struct nouveau_mem *node = old_mem->mm_node;
754 u64 src_offset = node->vma[0].offset;
755 u64 dst_offset = node->vma[1].offset;
756 u32 page_count = new_mem->num_pages;
759 page_count = new_mem->num_pages;
761 int line_count = (page_count > 2047) ? 2047 : page_count;
763 ret = RING_SPACE(chan, 12);
767 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
768 OUT_RING (chan, upper_32_bits(dst_offset));
769 OUT_RING (chan, lower_32_bits(dst_offset));
770 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
771 OUT_RING (chan, upper_32_bits(src_offset));
772 OUT_RING (chan, lower_32_bits(src_offset));
773 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
774 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
775 OUT_RING (chan, PAGE_SIZE); /* line_length */
776 OUT_RING (chan, line_count);
777 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
778 OUT_RING (chan, 0x00100110);
780 page_count -= line_count;
781 src_offset += (PAGE_SIZE * line_count);
782 dst_offset += (PAGE_SIZE * line_count);
789 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
790 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
792 struct nouveau_mem *node = old_mem->mm_node;
793 u64 src_offset = node->vma[0].offset;
794 u64 dst_offset = node->vma[1].offset;
795 u32 page_count = new_mem->num_pages;
798 page_count = new_mem->num_pages;
800 int line_count = (page_count > 8191) ? 8191 : page_count;
802 ret = RING_SPACE(chan, 11);
806 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
807 OUT_RING (chan, upper_32_bits(src_offset));
808 OUT_RING (chan, lower_32_bits(src_offset));
809 OUT_RING (chan, upper_32_bits(dst_offset));
810 OUT_RING (chan, lower_32_bits(dst_offset));
811 OUT_RING (chan, PAGE_SIZE);
812 OUT_RING (chan, PAGE_SIZE);
813 OUT_RING (chan, PAGE_SIZE);
814 OUT_RING (chan, line_count);
815 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
816 OUT_RING (chan, 0x00000110);
818 page_count -= line_count;
819 src_offset += (PAGE_SIZE * line_count);
820 dst_offset += (PAGE_SIZE * line_count);
827 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
828 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
830 struct nouveau_mem *node = old_mem->mm_node;
831 int ret = RING_SPACE(chan, 7);
833 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
834 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
835 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
836 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
837 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
838 OUT_RING (chan, 0x00000000 /* COPY */);
839 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
845 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
846 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
848 struct nouveau_mem *node = old_mem->mm_node;
849 int ret = RING_SPACE(chan, 7);
851 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
852 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
853 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
854 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
855 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
856 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
857 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
863 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
865 int ret = RING_SPACE(chan, 6);
867 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
868 OUT_RING (chan, handle);
869 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
870 OUT_RING (chan, chan->drm->ntfy.handle);
871 OUT_RING (chan, chan->vram.handle);
872 OUT_RING (chan, chan->vram.handle);
879 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
880 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
882 struct nouveau_mem *node = old_mem->mm_node;
883 u64 length = (new_mem->num_pages << PAGE_SHIFT);
884 u64 src_offset = node->vma[0].offset;
885 u64 dst_offset = node->vma[1].offset;
886 int src_tiled = !!node->memtype;
887 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
891 u32 amount, stride, height;
893 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
897 amount = min(length, (u64)(4 * 1024 * 1024));
899 height = amount / stride;
902 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
905 OUT_RING (chan, stride);
906 OUT_RING (chan, height);
911 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
915 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
918 OUT_RING (chan, stride);
919 OUT_RING (chan, height);
924 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
928 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
929 OUT_RING (chan, upper_32_bits(src_offset));
930 OUT_RING (chan, upper_32_bits(dst_offset));
931 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
932 OUT_RING (chan, lower_32_bits(src_offset));
933 OUT_RING (chan, lower_32_bits(dst_offset));
934 OUT_RING (chan, stride);
935 OUT_RING (chan, stride);
936 OUT_RING (chan, stride);
937 OUT_RING (chan, height);
938 OUT_RING (chan, 0x00000101);
939 OUT_RING (chan, 0x00000000);
940 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
944 src_offset += amount;
945 dst_offset += amount;
952 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
954 int ret = RING_SPACE(chan, 4);
956 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
957 OUT_RING (chan, handle);
958 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
959 OUT_RING (chan, chan->drm->ntfy.handle);
965 static inline uint32_t
966 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
967 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
969 if (mem->mem_type == TTM_PL_TT)
971 return chan->vram.handle;
975 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
976 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
978 u32 src_offset = old_mem->start << PAGE_SHIFT;
979 u32 dst_offset = new_mem->start << PAGE_SHIFT;
980 u32 page_count = new_mem->num_pages;
983 ret = RING_SPACE(chan, 3);
987 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
988 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
989 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
991 page_count = new_mem->num_pages;
993 int line_count = (page_count > 2047) ? 2047 : page_count;
995 ret = RING_SPACE(chan, 11);
999 BEGIN_NV04(chan, NvSubCopy,
1000 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1001 OUT_RING (chan, src_offset);
1002 OUT_RING (chan, dst_offset);
1003 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1004 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1005 OUT_RING (chan, PAGE_SIZE); /* line_length */
1006 OUT_RING (chan, line_count);
1007 OUT_RING (chan, 0x00000101);
1008 OUT_RING (chan, 0x00000000);
1009 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1012 page_count -= line_count;
1013 src_offset += (PAGE_SIZE * line_count);
1014 dst_offset += (PAGE_SIZE * line_count);
1021 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1022 struct ttm_mem_reg *mem)
1024 struct nouveau_mem *old_node = bo->mem.mm_node;
1025 struct nouveau_mem *new_node = mem->mm_node;
1026 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1029 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
1030 NV_MEM_ACCESS_RW, &old_node->vma[0]);
1034 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
1035 NV_MEM_ACCESS_RW, &old_node->vma[1]);
1037 nouveau_vm_put(&old_node->vma[0]);
1041 nouveau_vm_map(&old_node->vma[0], old_node);
1042 nouveau_vm_map(&old_node->vma[1], new_node);
1047 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1048 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1050 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1051 struct nouveau_channel *chan = drm->ttm.chan;
1052 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
1053 struct nouveau_fence *fence;
1056 /* create temporary vmas for the transfer and attach them to the
1057 * old nouveau_mem node, these will get cleaned up after ttm has
1058 * destroyed the ttm_mem_reg
1060 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1061 ret = nouveau_bo_move_prep(drm, bo, new_mem);
1066 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1067 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1069 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1071 ret = nouveau_fence_new(chan, false, &fence);
1073 ret = ttm_bo_move_accel_cleanup(bo,
1078 nouveau_fence_unref(&fence);
1082 mutex_unlock(&cli->mutex);
1087 nouveau_bo_move_init(struct nouveau_drm *drm)
1089 static const struct {
1093 int (*exec)(struct nouveau_channel *,
1094 struct ttm_buffer_object *,
1095 struct ttm_mem_reg *, struct ttm_mem_reg *);
1096 int (*init)(struct nouveau_channel *, u32 handle);
1098 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1099 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1100 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1101 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1102 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1103 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1104 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1105 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1106 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1108 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1109 }, *mthd = _methods;
1110 const char *name = "CPU";
1114 struct nouveau_channel *chan;
1119 chan = drm->channel;
1123 ret = nvif_object_init(chan->object, NULL,
1124 mthd->oclass | (mthd->engine << 16),
1125 mthd->oclass, NULL, 0,
1128 ret = mthd->init(chan, drm->ttm.copy.handle);
1130 nvif_object_fini(&drm->ttm.copy);
1134 drm->ttm.move = mthd->exec;
1135 drm->ttm.chan = chan;
1139 } while ((++mthd)->exec);
1141 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1145 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1146 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1148 struct ttm_place placement_memtype = {
1151 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1153 struct ttm_placement placement;
1154 struct ttm_mem_reg tmp_mem;
1157 placement.num_placement = placement.num_busy_placement = 1;
1158 placement.placement = placement.busy_placement = &placement_memtype;
1161 tmp_mem.mm_node = NULL;
1162 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1166 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1170 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1174 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1176 ttm_bo_mem_put(bo, &tmp_mem);
1181 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1182 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1184 struct ttm_place placement_memtype = {
1187 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1189 struct ttm_placement placement;
1190 struct ttm_mem_reg tmp_mem;
1193 placement.num_placement = placement.num_busy_placement = 1;
1194 placement.placement = placement.busy_placement = &placement_memtype;
1197 tmp_mem.mm_node = NULL;
1198 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1202 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1206 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1211 ttm_bo_mem_put(bo, &tmp_mem);
1216 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1218 struct nouveau_bo *nvbo = nouveau_bo(bo);
1219 struct nouveau_vma *vma;
1221 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1222 if (bo->destroy != nouveau_bo_del_ttm)
1225 list_for_each_entry(vma, &nvbo->vma_list, head) {
1226 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1227 (new_mem->mem_type == TTM_PL_VRAM ||
1228 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
1229 nouveau_vm_map(vma, new_mem->mm_node);
1231 nouveau_vm_unmap(vma);
1237 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1238 struct nouveau_drm_tile **new_tile)
1240 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1241 struct drm_device *dev = drm->dev;
1242 struct nouveau_bo *nvbo = nouveau_bo(bo);
1243 u64 offset = new_mem->start << PAGE_SHIFT;
1246 if (new_mem->mem_type != TTM_PL_VRAM)
1249 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1250 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1259 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1260 struct nouveau_drm_tile *new_tile,
1261 struct nouveau_drm_tile **old_tile)
1263 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1264 struct drm_device *dev = drm->dev;
1265 struct fence *fence = reservation_object_get_excl(bo->resv);
1267 nv10_bo_put_tile_region(dev, *old_tile, fence);
1268 *old_tile = new_tile;
1272 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1273 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1275 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1276 struct nouveau_bo *nvbo = nouveau_bo(bo);
1277 struct ttm_mem_reg *old_mem = &bo->mem;
1278 struct nouveau_drm_tile *new_tile = NULL;
1281 if (nvbo->pin_refcnt)
1282 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1284 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1285 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1291 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1292 BUG_ON(bo->mem.mm_node != NULL);
1294 new_mem->mm_node = NULL;
1298 /* Hardware assisted copy. */
1299 if (drm->ttm.move) {
1300 if (new_mem->mem_type == TTM_PL_SYSTEM)
1301 ret = nouveau_bo_move_flipd(bo, evict, intr,
1302 no_wait_gpu, new_mem);
1303 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1304 ret = nouveau_bo_move_flips(bo, evict, intr,
1305 no_wait_gpu, new_mem);
1307 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1308 no_wait_gpu, new_mem);
1313 /* Fallback to software copy. */
1314 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1316 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1319 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1321 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1323 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1330 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1332 struct nouveau_bo *nvbo = nouveau_bo(bo);
1334 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1338 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1340 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1341 struct nouveau_drm *drm = nouveau_bdev(bdev);
1342 struct nouveau_mem *node = mem->mm_node;
1345 mem->bus.addr = NULL;
1346 mem->bus.offset = 0;
1347 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1349 mem->bus.is_iomem = false;
1350 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1352 switch (mem->mem_type) {
1358 if (drm->agp.stat == ENABLED) {
1359 mem->bus.offset = mem->start << PAGE_SHIFT;
1360 mem->bus.base = drm->agp.base;
1361 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
1364 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1367 /* fallthrough, tiled memory */
1369 mem->bus.offset = mem->start << PAGE_SHIFT;
1370 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
1371 mem->bus.is_iomem = true;
1372 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1373 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1375 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1380 mem->bus.offset = node->bar_vma.offset;
1390 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1392 struct nouveau_drm *drm = nouveau_bdev(bdev);
1393 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1394 struct nouveau_mem *node = mem->mm_node;
1396 if (!node->bar_vma.node)
1399 bar->unmap(bar, &node->bar_vma);
1403 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1405 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1406 struct nouveau_bo *nvbo = nouveau_bo(bo);
1407 struct nvif_device *device = &drm->device;
1408 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
1411 /* as long as the bo isn't in vram, and isn't tiled, we've got
1412 * nothing to do here.
1414 if (bo->mem.mem_type != TTM_PL_VRAM) {
1415 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1416 !nouveau_bo_tile_layout(nvbo))
1419 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1420 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1422 ret = nouveau_bo_validate(nvbo, false, false);
1429 /* make sure bo is in mappable vram */
1430 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1431 bo->mem.start + bo->mem.num_pages < mappable)
1434 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1435 nvbo->placements[i].fpfn = 0;
1436 nvbo->placements[i].lpfn = mappable;
1439 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1440 nvbo->busy_placements[i].fpfn = 0;
1441 nvbo->busy_placements[i].lpfn = mappable;
1444 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1445 return nouveau_bo_validate(nvbo, false, false);
1449 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1451 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1452 struct nouveau_drm *drm;
1453 struct nouveau_device *device;
1454 struct drm_device *dev;
1455 struct device *pdev;
1458 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1460 if (ttm->state != tt_unpopulated)
1463 if (slave && ttm->sg) {
1464 /* make userspace faulting work */
1465 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1466 ttm_dma->dma_address, ttm->num_pages);
1467 ttm->state = tt_unbound;
1471 drm = nouveau_bdev(ttm->bdev);
1472 device = nvkm_device(&drm->device);
1474 pdev = nv_device_base(device);
1477 * Objects matching this condition have been marked as force_coherent,
1478 * so use the DMA API for them.
1480 if (!nv_device_is_cpu_coherent(device) &&
1481 ttm->caching_state == tt_uncached)
1482 return ttm_dma_populate(ttm_dma, dev->dev);
1485 if (drm->agp.stat == ENABLED) {
1486 return ttm_agp_tt_populate(ttm);
1490 #ifdef CONFIG_SWIOTLB
1491 if (swiotlb_nr_tbl()) {
1492 return ttm_dma_populate((void *)ttm, dev->dev);
1496 r = ttm_pool_populate(ttm);
1501 for (i = 0; i < ttm->num_pages; i++) {
1504 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1507 if (dma_mapping_error(pdev, addr)) {
1509 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1510 PAGE_SIZE, DMA_BIDIRECTIONAL);
1511 ttm_dma->dma_address[i] = 0;
1513 ttm_pool_unpopulate(ttm);
1517 ttm_dma->dma_address[i] = addr;
1523 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1525 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1526 struct nouveau_drm *drm;
1527 struct nouveau_device *device;
1528 struct drm_device *dev;
1529 struct device *pdev;
1531 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1536 drm = nouveau_bdev(ttm->bdev);
1537 device = nvkm_device(&drm->device);
1539 pdev = nv_device_base(device);
1542 * Objects matching this condition have been marked as force_coherent,
1543 * so use the DMA API for them.
1545 if (!nv_device_is_cpu_coherent(device) &&
1546 ttm->caching_state == tt_uncached)
1547 ttm_dma_unpopulate(ttm_dma, dev->dev);
1550 if (drm->agp.stat == ENABLED) {
1551 ttm_agp_tt_unpopulate(ttm);
1556 #ifdef CONFIG_SWIOTLB
1557 if (swiotlb_nr_tbl()) {
1558 ttm_dma_unpopulate((void *)ttm, dev->dev);
1563 for (i = 0; i < ttm->num_pages; i++) {
1564 if (ttm_dma->dma_address[i]) {
1565 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1570 ttm_pool_unpopulate(ttm);
1574 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1576 struct reservation_object *resv = nvbo->bo.resv;
1579 reservation_object_add_excl_fence(resv, &fence->base);
1581 reservation_object_add_shared_fence(resv, &fence->base);
1584 struct ttm_bo_driver nouveau_bo_driver = {
1585 .ttm_tt_create = &nouveau_ttm_tt_create,
1586 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1587 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1588 .invalidate_caches = nouveau_bo_invalidate_caches,
1589 .init_mem_type = nouveau_bo_init_mem_type,
1590 .evict_flags = nouveau_bo_evict_flags,
1591 .move_notify = nouveau_bo_move_ntfy,
1592 .move = nouveau_bo_move,
1593 .verify_access = nouveau_bo_verify_access,
1594 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1595 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1596 .io_mem_free = &nouveau_ttm_io_mem_free,
1599 struct nouveau_vma *
1600 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1602 struct nouveau_vma *vma;
1603 list_for_each_entry(vma, &nvbo->vma_list, head) {
1612 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1613 struct nouveau_vma *vma)
1615 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1618 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1619 NV_MEM_ACCESS_RW, vma);
1623 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1624 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1625 nvbo->page_shift != vma->vm->vmm->lpg_shift))
1626 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1628 list_add_tail(&vma->head, &nvbo->vma_list);
1634 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1637 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1638 nouveau_vm_unmap(vma);
1639 nouveau_vm_put(vma);
1640 list_del(&vma->head);