/* First part of the test, are we updating our pagetable bo with a new entry? */
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
- expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, XE_CACHE_WB, 0);
+ expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
#define GUC_GGTT_TOP 0xFEE00000
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache)
+ u16 pat_index)
{
u64 pte;
}
static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache)
+ u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
- u32 pat_index = xe->pat.idx[cache];
u64 pte;
- pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, cache);
+ pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
xe_assert(xe, pat_index <= 3);
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 end = start + size - 1;
u64 scratch_pte;
if (ggtt->scratch)
scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
- XE_CACHE_WB);
+ pat_index);
else
scratch_pte = 0;
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 addr, scratch_pte;
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, XE_CACHE_WB);
+ scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
printk("%sGlobal GTT:", prefix);
for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
- pte = ggtt->pt_ops->pte_encode_bo(bo, offset, XE_CACHE_WB);
+ pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
struct xe_gt;
struct xe_ggtt_pt_ops {
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache);
+ u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
};
struct xe_ggtt {
struct xe_vm *vm)
{
struct xe_device *xe = tile_to_xe(tile);
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i;
return ret;
}
- entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
+ entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB, 0);
+ pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
- XE_CACHE_WB, 0);
+ pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
- XE_PAGE_SIZE, XE_CACHE_WB);
+ XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB);
+ pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(xe, 0, XE_CACHE_WB, level,
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
/*
struct xe_res_cursor *cur,
u32 size, struct xe_bo *bo)
{
+ u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
}
addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
- addr, XE_CACHE_WB,
+ addr, pat_index,
0, devmem, flags);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
bool first_munmap_rebind = vma &&
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
struct xe_exec_queue *q_override = !q ? m->q : q;
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
/* Use the CPU if no in syncs and engine is idle */
if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
xe_tile_assert(tile, pt_bo->size == SZ_4K);
- addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, XE_CACHE_WB, 0);
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
unsigned int level)
{
+ u16 pat_index = tile_to_xe(tile)->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
if (!vm->scratch_bo[id])
if (level > 0)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
- 0, XE_CACHE_WB);
+ 0, pat_index);
- return vm->pt_ops->pte_encode_bo(vm->scratch_bo[id], 0, XE_CACHE_WB, 0);
+ return vm->pt_ops->pte_encode_bo(vm->scratch_bo[id], 0, pat_index, 0);
}
/**
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
+ u16 pat_index = tile_to_xe(xe_walk->tile)->pat.idx[xe_walk->cache];
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, xe_walk->cache, level);
+ xe_walk->vma, pat_index, level);
pte |= xe_walk->default_pte;
/*
xe_child->is_compact = true;
}
- pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0,
- xe_walk->cache) | flags;
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
struct xe_pt_ops {
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache, u32 pt_level);
+ u16 pat_index, u32 pt_level);
u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
- enum xe_cache_level cache, u32 pt_level);
+ u16 pat_index, u32 pt_level);
u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
- enum xe_cache_level cache,
+ u16 pat_index,
u32 pt_level, bool devmem, u64 flags);
u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level cache);
+ u16 pat_index);
};
struct xe_pt_entry {
.vm_free = xe_vm_free,
};
-static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
+static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
- u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
return pte;
}
-static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
- u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
}
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level cache)
+ const u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_cache(xe, cache);
+ pde |= pde_encode_pat_index(xe, pat_index);
return pde;
}
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache, u32 pt_level)
+ u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
}
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
- enum xe_cache_level cache, u32 pt_level)
+ u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
}
static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
- enum xe_cache_level cache,
+ u16 pat_index,
u32 pt_level, bool devmem, u64 flags)
{
u64 pte;
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (devmem)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
- XE_CACHE_WB);
+ tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
}
static struct dma_fence *