#define __NVKM_GPUOBJ_H__
#include <core/memory.h>
#include <core/mm.h>
-struct nvkm_vma;
-struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_HEAP 0x00000004
void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+ int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
};
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **);
int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
- struct nvkm_vma *);
-void nvkm_gpuobj_unmap(struct nvkm_vma *);
void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length);
void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
#include <core/os.h>
struct nvkm_device;
struct nvkm_vma;
-struct nvkm_vm;
+struct nvkm_vmm;
struct nvkm_tags {
struct nvkm_mm_node *mn;
enum nvkm_memory_target (*target)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *);
- void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+ void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *);
- void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
+ int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
};
struct nvkm_memory_ptrs {
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
-#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+#define nvkm_memory_map(p,o,vm,va,av,ac) \
+ (p)->func->map((p),(o),(vm),(va),(av),(ac))
/* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets
}
/* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_heap_map,
};
static const struct nvkm_gpuobj_func
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
+ .map = nvkm_gpuobj_heap_map,
};
static void *
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
+ .map = nvkm_gpuobj_heap_map,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+ vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_map,
};
static const struct nvkm_gpuobj_func
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
+ .map = nvkm_gpuobj_map,
};
static void *
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
+ .map = nvkm_gpuobj_map,
};
static int
return ret;
}
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
- u32 access, struct nvkm_vma *vma)
-{
- struct nvkm_memory *memory = gpuobj->memory;
- int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret == 0)
- nvkm_memory_map(memory, vma, 0);
- return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
- if (vma->node) {
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- }
-}
-
/* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else.
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
}
static void
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
}
static void
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
+ if (vma->vm) {
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+ }
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
+ &chan->engn[engn].vma, NULL, 0);
}
static void
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
+ if (vma->vm) {
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+ }
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
+ &chan->engn[engn].vma, NULL, 0);
}
static void
if (ret)
return ret;
- nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+ ret = nvkm_memory_map(chan->mmio, 0, fifoch->vm,
+ &chan->mmio_vma, NULL, 0);
+ if (ret)
+ return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
if (ret)
return ret;
- nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
+ ret = nvkm_memory_map(chan->data[i].mem, 0, fifoch->vm,
+ &chan->data[i].vma, NULL, 0);
+ if (ret)
+ return ret;
+
data++;
}
node->vaddr[offset / 4] = data;
}
-static void
-gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
-
- nvkm_vm_map_at(vma, offset, &node->mem);
+ nvkm_vm_map_at(vma, 0, &node->mem);
+ return 0;
}
static void *
}
if (ret == 0)
- nvkm_memory_map(memory, &bar, 0);
+ ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0);
mutex_lock(&subdev->mutex);
if (ret || iobj->bar.node) {
/* We either failed, or another thread beat us. */
}
}
-static void
-nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
nvkm_vm_map_at(vma, offset, iobj->mem);
+ return 0;
}
static void
return ret;
/* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+ ret = nvkm_vm_get(gsb->vm, blob->size, 12, NV_MEM_ACCESS_RW, &vma);
if (ret) {
nvkm_falcon_put(falcon, subdev);
return ret;
}
+ ret = nvkm_memory_map(blob, 0, gsb->vm, &vma, NULL, 0);
+ if (ret)
+ goto end;
+
/* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon);
if (ret)
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */
- nvkm_gpuobj_unmap(&vma);
+ nvkm_vm_unmap(&vma);
+ nvkm_vm_put(&vma);
nvkm_falcon_put(falcon, subdev);
return ret;