2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
33 #include "nouveau_drv.h"
34 #include "nouveau_chan.h"
35 #include "nouveau_fence.h"
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
47 static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
48 struct ttm_resource *reg);
49 static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
52 * NV10-NV40 tiling helpers
56 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
57 u32 addr, u32 size, u32 pitch, u32 flags)
59 struct nouveau_drm *drm = nouveau_drm(dev);
60 int i = reg - drm->tile.reg;
61 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
62 struct nvkm_fb_tile *tile = &fb->tile.region[i];
64 nouveau_fence_unref(®->fence);
67 nvkm_fb_tile_fini(fb, i, tile);
70 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
72 nvkm_fb_tile_prog(fb, i, tile);
75 static struct nouveau_drm_tile *
76 nv10_bo_get_tile_region(struct drm_device *dev, int i)
78 struct nouveau_drm *drm = nouveau_drm(dev);
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
81 spin_lock(&drm->tile.lock);
84 (!tile->fence || nouveau_fence_done(tile->fence)))
89 spin_unlock(&drm->tile.lock);
94 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct dma_fence *fence)
97 struct nouveau_drm *drm = nouveau_drm(dev);
100 spin_lock(&drm->tile.lock);
101 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
103 spin_unlock(&drm->tile.lock);
107 static struct nouveau_drm_tile *
108 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 zeta)
111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
113 struct nouveau_drm_tile *tile, *found = NULL;
116 for (i = 0; i < fb->tile.regions; i++) {
117 tile = nv10_bo_get_tile_region(dev, i);
119 if (pitch && !found) {
123 } else if (tile && fb->tile.region[i].pitch) {
124 /* Kill an unused tile region. */
125 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
128 nv10_bo_put_tile_region(dev, tile, NULL);
132 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
137 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
140 struct drm_device *dev = drm->dev;
141 struct nouveau_bo *nvbo = nouveau_bo(bo);
143 WARN_ON(nvbo->bo.pin_count > 0);
144 nouveau_bo_del_io_reserve_lru(bo);
145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
148 * If nouveau_bo_new() allocated this buffer, the GEM object was never
149 * initialized, so don't attempt to release it.
152 drm_gem_object_release(&bo->base);
158 roundup_64(u64 x, u32 y)
166 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
168 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
169 struct nvif_device *device = &drm->client.device;
171 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
173 if (device->info.chipset >= 0x40) {
175 *size = roundup_64(*size, 64 * nvbo->mode);
177 } else if (device->info.chipset >= 0x30) {
179 *size = roundup_64(*size, 64 * nvbo->mode);
181 } else if (device->info.chipset >= 0x20) {
183 *size = roundup_64(*size, 64 * nvbo->mode);
185 } else if (device->info.chipset >= 0x10) {
187 *size = roundup_64(*size, 32 * nvbo->mode);
191 *size = roundup_64(*size, (1 << nvbo->page));
192 *align = max((1 << nvbo->page), *align);
195 *size = roundup_64(*size, PAGE_SIZE);
199 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
200 u32 tile_mode, u32 tile_flags)
202 struct nouveau_drm *drm = cli->drm;
203 struct nouveau_bo *nvbo;
204 struct nvif_mmu *mmu = &cli->mmu;
205 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
209 NV_WARN(drm, "skipped size %016llx\n", *size);
210 return ERR_PTR(-EINVAL);
213 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
215 return ERR_PTR(-ENOMEM);
216 INIT_LIST_HEAD(&nvbo->head);
217 INIT_LIST_HEAD(&nvbo->entry);
218 INIT_LIST_HEAD(&nvbo->vma_list);
219 nvbo->bo.bdev = &drm->ttm.bdev;
221 /* This is confusing, and doesn't actually mean we want an uncached
222 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
223 * into in nouveau_gem_new().
225 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
226 /* Determine if we can get a cache-coherent map, forcing
227 * uncached mapping if we can't.
229 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
230 nvbo->force_coherent = true;
233 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
234 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
235 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
237 return ERR_PTR(-EINVAL);
240 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
242 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
243 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
244 nvbo->comp = (tile_flags & 0x00030000) >> 16;
245 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
247 return ERR_PTR(-EINVAL);
250 nvbo->zeta = (tile_flags & 0x00000007);
252 nvbo->mode = tile_mode;
253 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
255 /* Determine the desirable target GPU page size for the buffer. */
256 for (i = 0; i < vmm->page_nr; i++) {
257 /* Because we cannot currently allow VMM maps to fail
258 * during buffer migration, we need to determine page
259 * size for the buffer up-front, and pre-allocate its
262 * Skip page sizes that can't support needed domains.
264 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
265 (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
267 if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
268 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
271 /* Select this page size if it's the first that supports
272 * the potential memory domains, or when it's compatible
273 * with the requested compression settings.
275 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
278 /* Stop once the buffer is larger than the current page size. */
279 if (*size >= 1ULL << vmm->page[i].shift)
284 return ERR_PTR(-EINVAL);
286 /* Disable compression if suitable settings couldn't be found. */
287 if (nvbo->comp && !vmm->page[pi].comp) {
288 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
289 nvbo->kind = mmu->kind[nvbo->kind];
292 nvbo->page = vmm->page[pi].shift;
294 nouveau_bo_fixup_align(nvbo, align, size);
300 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
301 struct sg_table *sg, struct dma_resv *robj)
303 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
307 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
309 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
310 nouveau_bo_placement_set(nvbo, domain, 0);
311 INIT_LIST_HEAD(&nvbo->io_reserve_lru);
313 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
314 &nvbo->placement, align >> PAGE_SHIFT, false,
315 acc_size, sg, robj, nouveau_bo_del_ttm);
317 /* ttm will call nouveau_bo_del_ttm if it fails.. */
325 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
326 uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
327 struct sg_table *sg, struct dma_resv *robj,
328 struct nouveau_bo **pnvbo)
330 struct nouveau_bo *nvbo;
333 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
336 return PTR_ERR(nvbo);
338 ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
347 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
351 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
352 pl[*n].mem_type = TTM_PL_VRAM;
356 if (domain & NOUVEAU_GEM_DOMAIN_GART) {
357 pl[*n].mem_type = TTM_PL_TT;
361 if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
362 pl[*n].mem_type = TTM_PL_SYSTEM;
363 pl[(*n)++].flags = 0;
368 set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
370 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
371 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
372 unsigned i, fpfn, lpfn;
374 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
375 nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
376 nvbo->bo.mem.num_pages < vram_pages / 4) {
378 * Make sure that the color and depth buffers are handled
379 * by independent memory controller units. Up to a 9x
380 * speed up when alpha-blending and depth-test are enabled
384 fpfn = vram_pages / 2;
388 lpfn = vram_pages / 2;
390 for (i = 0; i < nvbo->placement.num_placement; ++i) {
391 nvbo->placements[i].fpfn = fpfn;
392 nvbo->placements[i].lpfn = lpfn;
394 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
395 nvbo->busy_placements[i].fpfn = fpfn;
396 nvbo->busy_placements[i].lpfn = lpfn;
402 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
405 struct ttm_placement *pl = &nvbo->placement;
407 pl->placement = nvbo->placements;
408 set_placement_list(nvbo->placements, &pl->num_placement, domain);
410 pl->busy_placement = nvbo->busy_placements;
411 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
414 set_placement_range(nvbo, domain);
418 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
420 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
421 struct ttm_buffer_object *bo = &nvbo->bo;
422 bool force = false, evict = false;
425 ret = ttm_bo_reserve(bo, false, false, NULL);
429 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
430 domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
438 if (nvbo->bo.pin_count) {
441 switch (bo->mem.mem_type) {
443 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
446 error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
452 NV_ERROR(drm, "bo %p pinned elsewhere: "
453 "0x%08x vs 0x%08x\n", bo,
454 bo->mem.mem_type, domain);
457 ttm_bo_pin(&nvbo->bo);
462 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
463 ret = nouveau_bo_validate(nvbo, false, false);
468 nouveau_bo_placement_set(nvbo, domain, 0);
469 ret = nouveau_bo_validate(nvbo, false, false);
473 ttm_bo_pin(&nvbo->bo);
475 switch (bo->mem.mem_type) {
477 drm->gem.vram_available -= bo->mem.size;
480 drm->gem.gart_available -= bo->mem.size;
488 nvbo->contig = false;
489 ttm_bo_unreserve(bo);
494 nouveau_bo_unpin(struct nouveau_bo *nvbo)
496 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
497 struct ttm_buffer_object *bo = &nvbo->bo;
500 ret = ttm_bo_reserve(bo, false, false, NULL);
504 ttm_bo_unpin(&nvbo->bo);
505 if (!nvbo->bo.pin_count) {
506 switch (bo->mem.mem_type) {
508 drm->gem.vram_available += bo->mem.size;
511 drm->gem.gart_available += bo->mem.size;
518 ttm_bo_unreserve(bo);
523 nouveau_bo_map(struct nouveau_bo *nvbo)
527 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
531 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
533 ttm_bo_unreserve(&nvbo->bo);
538 nouveau_bo_unmap(struct nouveau_bo *nvbo)
543 ttm_bo_kunmap(&nvbo->kmap);
547 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
549 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
550 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
556 /* Don't waste time looping if the object is coherent */
557 if (nvbo->force_coherent)
560 for (i = 0; i < ttm_dma->num_pages; i++)
561 dma_sync_single_for_device(drm->dev->dev,
562 ttm_dma->dma_address[i],
563 PAGE_SIZE, DMA_TO_DEVICE);
567 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
569 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
570 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
576 /* Don't waste time looping if the object is coherent */
577 if (nvbo->force_coherent)
580 for (i = 0; i < ttm_dma->num_pages; i++)
581 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
582 PAGE_SIZE, DMA_FROM_DEVICE);
585 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
587 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
588 struct nouveau_bo *nvbo = nouveau_bo(bo);
590 mutex_lock(&drm->ttm.io_reserve_mutex);
591 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
592 mutex_unlock(&drm->ttm.io_reserve_mutex);
595 void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
597 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
598 struct nouveau_bo *nvbo = nouveau_bo(bo);
600 mutex_lock(&drm->ttm.io_reserve_mutex);
601 list_del_init(&nvbo->io_reserve_lru);
602 mutex_unlock(&drm->ttm.io_reserve_mutex);
606 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
609 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
612 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
616 nouveau_bo_sync_for_device(nvbo);
622 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
625 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
630 iowrite16_native(val, (void __force __iomem *)mem);
636 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
639 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
644 return ioread32_native((void __force __iomem *)mem);
650 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
653 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
658 iowrite32_native(val, (void __force __iomem *)mem);
663 static struct ttm_tt *
664 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
666 #if IS_ENABLED(CONFIG_AGP)
667 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
669 if (drm->agp.bridge) {
670 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
674 return nouveau_sgdma_create_ttm(bo, page_flags);
678 nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
679 struct ttm_resource *reg)
681 #if IS_ENABLED(CONFIG_AGP)
682 struct nouveau_drm *drm = nouveau_bdev(bdev);
686 #if IS_ENABLED(CONFIG_AGP)
688 return ttm_agp_bind(ttm, reg);
690 return nouveau_sgdma_bind(bdev, ttm, reg);
694 nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
696 #if IS_ENABLED(CONFIG_AGP)
697 struct nouveau_drm *drm = nouveau_bdev(bdev);
699 if (drm->agp.bridge) {
704 nouveau_sgdma_unbind(bdev, ttm);
708 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
710 struct nouveau_bo *nvbo = nouveau_bo(bo);
712 switch (bo->mem.mem_type) {
714 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
715 NOUVEAU_GEM_DOMAIN_CPU);
718 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
722 *pl = nvbo->placement;
726 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
727 struct ttm_resource *reg)
729 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
730 struct nouveau_mem *new_mem = nouveau_mem(reg);
731 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
734 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
735 old_mem->mem.size, &old_mem->vma[0]);
739 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
740 new_mem->mem.size, &old_mem->vma[1]);
744 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
748 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
751 nvif_vmm_put(vmm, &old_mem->vma[1]);
752 nvif_vmm_put(vmm, &old_mem->vma[0]);
758 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
759 struct ttm_operation_ctx *ctx,
760 struct ttm_resource *new_reg)
762 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
763 struct nouveau_channel *chan = drm->ttm.chan;
764 struct nouveau_cli *cli = (void *)chan->user.client;
765 struct nouveau_fence *fence;
768 /* create temporary vmas for the transfer and attach them to the
769 * old nvkm_mem node, these will get cleaned up after ttm has
770 * destroyed the ttm_resource
772 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
773 ret = nouveau_bo_move_prep(drm, bo, new_reg);
778 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
779 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
781 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
783 ret = nouveau_fence_new(chan, false, &fence);
785 ret = ttm_bo_move_accel_cleanup(bo,
789 nouveau_fence_unref(&fence);
793 mutex_unlock(&cli->mutex);
798 nouveau_bo_move_init(struct nouveau_drm *drm)
800 static const struct _method_table {
804 int (*exec)(struct nouveau_channel *,
805 struct ttm_buffer_object *,
806 struct ttm_resource *, struct ttm_resource *);
807 int (*init)(struct nouveau_channel *, u32 handle);
809 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
810 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
811 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
812 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
813 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
814 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
815 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
816 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
817 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
818 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
819 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
820 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
821 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
822 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
823 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
824 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
825 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
826 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
827 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
830 const struct _method_table *mthd = _methods;
831 const char *name = "CPU";
835 struct nouveau_channel *chan;
844 ret = nvif_object_ctor(&chan->user, "ttmBoMove",
845 mthd->oclass | (mthd->engine << 16),
846 mthd->oclass, NULL, 0,
849 ret = mthd->init(chan, drm->ttm.copy.handle);
851 nvif_object_dtor(&drm->ttm.copy);
855 drm->ttm.move = mthd->exec;
856 drm->ttm.chan = chan;
860 } while ((++mthd)->exec);
862 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
866 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict,
867 struct ttm_operation_ctx *ctx,
868 struct ttm_resource *new_reg)
870 struct ttm_place placement_memtype = {
873 .mem_type = TTM_PL_TT,
876 struct ttm_placement placement;
877 struct ttm_resource tmp_reg;
880 placement.num_placement = placement.num_busy_placement = 1;
881 placement.placement = placement.busy_placement = &placement_memtype;
884 tmp_reg.mm_node = NULL;
885 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
889 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
893 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
897 ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg);
901 ret = ttm_bo_wait_ctx(bo, ctx);
905 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
906 ttm_resource_free(bo, &bo->mem);
907 ttm_bo_assign_mem(bo, &tmp_reg);
909 ttm_resource_free(bo, &tmp_reg);
914 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict,
915 struct ttm_operation_ctx *ctx,
916 struct ttm_resource *new_reg)
918 struct ttm_place placement_memtype = {
921 .mem_type = TTM_PL_TT,
924 struct ttm_placement placement;
925 struct ttm_resource tmp_reg;
928 placement.num_placement = placement.num_busy_placement = 1;
929 placement.placement = placement.busy_placement = &placement_memtype;
932 tmp_reg.mm_node = NULL;
933 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
937 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
938 if (unlikely(ret != 0))
941 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
942 if (unlikely(ret != 0))
945 ttm_bo_assign_mem(bo, &tmp_reg);
946 ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg);
951 ttm_resource_free(bo, &tmp_reg);
956 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
957 struct ttm_resource *new_reg)
959 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
960 struct nouveau_bo *nvbo = nouveau_bo(bo);
961 struct nouveau_vma *vma;
963 /* ttm can now (stupidly) pass the driver bos it didn't create... */
964 if (bo->destroy != nouveau_bo_del_ttm)
967 nouveau_bo_del_io_reserve_lru(bo);
969 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
970 mem->mem.page == nvbo->page) {
971 list_for_each_entry(vma, &nvbo->vma_list, head) {
972 nouveau_vma_map(vma, mem);
975 list_for_each_entry(vma, &nvbo->vma_list, head) {
976 WARN_ON(ttm_bo_wait(bo, false, false));
977 nouveau_vma_unmap(vma);
982 if (new_reg->mm_node)
983 nvbo->offset = (new_reg->start << PAGE_SHIFT);
991 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
992 struct nouveau_drm_tile **new_tile)
994 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
995 struct drm_device *dev = drm->dev;
996 struct nouveau_bo *nvbo = nouveau_bo(bo);
997 u64 offset = new_reg->start << PAGE_SHIFT;
1000 if (new_reg->mem_type != TTM_PL_VRAM)
1003 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1004 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1005 nvbo->mode, nvbo->zeta);
1012 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1013 struct nouveau_drm_tile *new_tile,
1014 struct nouveau_drm_tile **old_tile)
1016 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1017 struct drm_device *dev = drm->dev;
1018 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1020 nv10_bo_put_tile_region(dev, *old_tile, fence);
1021 *old_tile = new_tile;
1025 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1026 struct ttm_operation_ctx *ctx,
1027 struct ttm_resource *new_reg)
1029 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1030 struct nouveau_bo *nvbo = nouveau_bo(bo);
1031 struct ttm_resource *old_reg = &bo->mem;
1032 struct nouveau_drm_tile *new_tile = NULL;
1035 if (new_reg->mem_type == TTM_PL_TT) {
1036 ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
1041 nouveau_bo_move_ntfy(bo, evict, new_reg);
1042 ret = ttm_bo_wait_ctx(bo, ctx);
1046 if (nvbo->bo.pin_count)
1047 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1049 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1050 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1056 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1057 ttm_bo_move_null(bo, new_reg);
1061 if (old_reg->mem_type == TTM_PL_SYSTEM &&
1062 new_reg->mem_type == TTM_PL_TT) {
1063 ttm_bo_move_null(bo, new_reg);
1067 if (old_reg->mem_type == TTM_PL_TT &&
1068 new_reg->mem_type == TTM_PL_SYSTEM) {
1069 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
1070 ttm_resource_free(bo, &bo->mem);
1071 ttm_bo_assign_mem(bo, new_reg);
1075 /* Hardware assisted copy. */
1076 if (drm->ttm.move) {
1077 if (new_reg->mem_type == TTM_PL_SYSTEM)
1078 ret = nouveau_bo_move_flipd(bo, evict, ctx,
1080 else if (old_reg->mem_type == TTM_PL_SYSTEM)
1081 ret = nouveau_bo_move_flips(bo, evict, ctx,
1084 ret = nouveau_bo_move_m2mf(bo, evict, ctx,
1090 /* Fallback to software copy. */
1091 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1094 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1096 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1098 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1102 swap(*new_reg, bo->mem);
1103 nouveau_bo_move_ntfy(bo, false, new_reg);
1104 swap(*new_reg, bo->mem);
1110 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1112 struct nouveau_bo *nvbo = nouveau_bo(bo);
1114 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1115 filp->private_data);
1119 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1120 struct ttm_resource *reg)
1122 struct nouveau_mem *mem = nouveau_mem(reg);
1124 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1125 switch (reg->mem_type) {
1128 nvif_object_unmap_handle(&mem->mem.object);
1131 nvif_object_unmap_handle(&mem->mem.object);
1140 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1142 struct nouveau_drm *drm = nouveau_bdev(bdev);
1143 struct nvkm_device *device = nvxx_device(&drm->client.device);
1144 struct nouveau_mem *mem = nouveau_mem(reg);
1145 struct nvif_mmu *mmu = &drm->client.mmu;
1146 const u8 type = mmu->type[drm->ttm.type_vram].type;
1149 mutex_lock(&drm->ttm.io_reserve_mutex);
1151 switch (reg->mem_type) {
1157 #if IS_ENABLED(CONFIG_AGP)
1158 if (drm->agp.bridge) {
1159 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1161 reg->bus.is_iomem = !drm->agp.cma;
1162 reg->bus.caching = ttm_write_combined;
1165 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1171 fallthrough; /* tiled memory */
1173 reg->bus.offset = (reg->start << PAGE_SHIFT) +
1174 device->func->resource_addr(device, 1);
1175 reg->bus.is_iomem = true;
1177 /* Some BARs do not support being ioremapped WC */
1178 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
1179 type & NVIF_MEM_UNCACHED)
1180 reg->bus.caching = ttm_uncached;
1182 reg->bus.caching = ttm_write_combined;
1184 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1186 struct nv50_mem_map_v0 nv50;
1187 struct gf100_mem_map_v0 gf100;
1192 switch (mem->mem.object.oclass) {
1193 case NVIF_CLASS_MEM_NV50:
1194 args.nv50.version = 0;
1196 args.nv50.kind = mem->kind;
1197 args.nv50.comp = mem->comp;
1198 argc = sizeof(args.nv50);
1200 case NVIF_CLASS_MEM_GF100:
1201 args.gf100.version = 0;
1203 args.gf100.kind = mem->kind;
1204 argc = sizeof(args.gf100);
1211 ret = nvif_object_map_handle(&mem->mem.object,
1215 if (WARN_ON(ret == 0))
1220 reg->bus.offset = handle;
1229 if (ret == -ENOSPC) {
1230 struct nouveau_bo *nvbo;
1232 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1236 list_del_init(&nvbo->io_reserve_lru);
1237 drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1239 nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1244 mutex_unlock(&drm->ttm.io_reserve_mutex);
1249 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1251 struct nouveau_drm *drm = nouveau_bdev(bdev);
1253 mutex_lock(&drm->ttm.io_reserve_mutex);
1254 nouveau_ttm_io_mem_free_locked(drm, reg);
1255 mutex_unlock(&drm->ttm.io_reserve_mutex);
1258 vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1260 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1261 struct nouveau_bo *nvbo = nouveau_bo(bo);
1262 struct nvkm_device *device = nvxx_device(&drm->client.device);
1263 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1266 /* as long as the bo isn't in vram, and isn't tiled, we've got
1267 * nothing to do here.
1269 if (bo->mem.mem_type != TTM_PL_VRAM) {
1270 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1274 if (bo->mem.mem_type != TTM_PL_SYSTEM)
1277 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
1280 /* make sure bo is in mappable vram */
1281 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1282 bo->mem.start + bo->mem.num_pages < mappable)
1285 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1286 nvbo->placements[i].fpfn = 0;
1287 nvbo->placements[i].lpfn = mappable;
1290 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1291 nvbo->busy_placements[i].fpfn = 0;
1292 nvbo->busy_placements[i].lpfn = mappable;
1295 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1298 ret = nouveau_bo_validate(nvbo, false, false);
1299 if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
1300 return VM_FAULT_NOPAGE;
1301 else if (unlikely(ret))
1302 return VM_FAULT_SIGBUS;
1304 ttm_bo_move_to_lru_tail_unlocked(bo);
1309 nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
1310 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1312 struct ttm_tt *ttm_dma = (void *)ttm;
1313 struct nouveau_drm *drm;
1315 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1317 if (ttm_tt_is_populated(ttm))
1320 if (slave && ttm->sg) {
1321 /* make userspace faulting work */
1322 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1323 ttm_dma->dma_address, ttm->num_pages);
1327 drm = nouveau_bdev(bdev);
1328 dev = drm->dev->dev;
1330 #if IS_ENABLED(CONFIG_AGP)
1331 if (drm->agp.bridge) {
1332 return ttm_pool_populate(ttm, ctx);
1336 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1337 if (swiotlb_nr_tbl()) {
1338 return ttm_dma_populate((void *)ttm, dev, ctx);
1341 return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
1345 nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1348 struct ttm_tt *ttm_dma = (void *)ttm;
1349 struct nouveau_drm *drm;
1351 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1356 drm = nouveau_bdev(bdev);
1357 dev = drm->dev->dev;
1359 #if IS_ENABLED(CONFIG_AGP)
1360 if (drm->agp.bridge) {
1361 ttm_pool_unpopulate(ttm);
1366 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1367 if (swiotlb_nr_tbl()) {
1368 ttm_dma_unpopulate((void *)ttm, dev);
1373 ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
1377 nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
1380 #if IS_ENABLED(CONFIG_AGP)
1381 struct nouveau_drm *drm = nouveau_bdev(bdev);
1382 if (drm->agp.bridge) {
1383 ttm_agp_unbind(ttm);
1384 ttm_tt_destroy_common(bdev, ttm);
1385 ttm_agp_destroy(ttm);
1389 nouveau_sgdma_destroy(bdev, ttm);
1393 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1395 struct dma_resv *resv = nvbo->bo.base.resv;
1398 dma_resv_add_excl_fence(resv, &fence->base);
1400 dma_resv_add_shared_fence(resv, &fence->base);
1404 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1406 nouveau_bo_move_ntfy(bo, false, NULL);
1409 struct ttm_bo_driver nouveau_bo_driver = {
1410 .ttm_tt_create = &nouveau_ttm_tt_create,
1411 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1412 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1413 .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
1414 .eviction_valuable = ttm_bo_eviction_valuable,
1415 .evict_flags = nouveau_bo_evict_flags,
1416 .delete_mem_notify = nouveau_bo_delete_mem_notify,
1417 .move = nouveau_bo_move,
1418 .verify_access = nouveau_bo_verify_access,
1419 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1420 .io_mem_free = &nouveau_ttm_io_mem_free,