/* First part of the test, are we updating our pagetable bo with a new entry? */
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
- expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0);
+ expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, XE_CACHE_WB, 0);
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
return ret;
}
- entry = xe_pde_encode(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
+ entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
- entry = xe_pte_encode(bo, i * XE_PAGE_SIZE, XE_CACHE_WB, 0);
+ entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
+ XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
- entry = xe_pte_encode(batch, i, XE_CACHE_WB, 0);
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = xe_pde_encode(bo, map_ofs + (level - 1) *
- XE_PAGE_SIZE, XE_CACHE_WB);
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ XE_PAGE_SIZE, XE_CACHE_WB);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = xe_pde_encode(bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB);
+ entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ XE_CACHE_WB);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
xe_tile_assert(tile, pt_bo->size == SZ_4K);
- addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0);
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
}
-static u64 pde_encode_cache(enum xe_cache_level cache)
-{
- /* FIXME: I don't think the PPAT handling is correct for MTL */
-
- if (cache != XE_CACHE_NONE)
- return PPAT_CACHED_PDE;
-
- return PPAT_UNCACHED;
-}
-
-static u64 pte_encode_cache(enum xe_cache_level cache)
-{
- /* FIXME: I don't think the PPAT handling is correct for MTL */
- switch (cache) {
- case XE_CACHE_NONE:
- return PPAT_UNCACHED;
- case XE_CACHE_WT:
- return PPAT_DISPLAY_ELLC;
- default:
- return PPAT_CACHED;
- }
-}
-
-static u64 pte_encode_ps(u32 pt_level)
-{
- /* XXX: Does hw support 1 GiB pages? */
- XE_WARN_ON(pt_level > 2);
-
- if (pt_level == 1)
- return XE_PDE_PS_2M;
- else if (pt_level == 2)
- return XE_PDPE_PS_1G;
-
- return 0;
-}
-
-/**
- * xe_pde_encode() - Encode a page-table directory entry pointing to
- * another page-table.
- * @bo: The page-table bo of the page-table to point to.
- * @bo_offset: Offset in the page-table bo to point to.
- * @cache: The cache level indicating the caching of @bo.
- *
- * TODO: Rename.
- *
- * Return: An encoded page directory entry. No errors.
- */
-u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level cache)
-{
- u64 pde;
-
- pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_cache(cache);
-
- return pde;
-}
-
-/**
- * xe_pte_encode() - Encode a page-table entry pointing to memory.
- * @bo: The BO representing the memory to point to.
- * @bo_offset: The offset into @bo.
- * @cache: The cache level indicating
- * @pt_level: The page-table level of the page-table into which the entry
- * is to be inserted.
- *
- * Return: An encoded page-table entry. No errors.
- */
-u64 xe_pte_encode(struct xe_bo *bo, u64 bo_offset, enum xe_cache_level cache,
- u32 pt_level)
-{
- u64 pte;
-
- pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(cache);
- pte |= pte_encode_ps(pt_level);
-
- if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
- pte |= XE_PPGTT_PTE_DM;
-
- return pte;
-}
-
-/* Like xe_pte_encode(), but with a vma and a partially-encoded pte */
-static u64 __vma_pte_encode(u64 pte, struct xe_vma *vma,
- enum xe_cache_level cache, u32 pt_level)
-{
- pte |= XE_PAGE_PRESENT;
-
- if (likely(!xe_vma_read_only(vma)))
- pte |= XE_PAGE_RW;
-
- pte |= pte_encode_cache(cache);
- pte |= pte_encode_ps(pt_level);
-
- if (unlikely(xe_vma_is_null(vma)))
- pte |= XE_PTE_NULL;
-
- return pte;
-}
-
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
unsigned int level)
{
if (!vm->scratch_bo[id])
return 0;
- if (level == 0) {
- u64 empty = xe_pte_encode(vm->scratch_bo[id], 0,
- XE_CACHE_WB, 0);
+ if (level > 0)
+ return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
+ 0, XE_CACHE_WB);
- return empty;
- } else {
- return xe_pde_encode(vm->scratch_pt[id][level - 1]->bo, 0,
- XE_CACHE_WB);
- }
+ return vm->pt_ops->pte_encode_bo(vm->scratch_bo[id], 0, XE_CACHE_WB, 0);
}
/**
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
+ struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
bool covers;
int ret = 0;
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = __vma_pte_encode(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, xe_walk->cache, level);
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) + xe_walk->dma_offset,
+ xe_walk->vma, xe_walk->cache, level);
pte |= xe_walk->default_pte;
/*
xe_child->is_compact = true;
}
- pte = xe_pde_encode(xe_child->bo, 0, xe_walk->cache) | flags;
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0,
+ xe_walk->cache) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
-u64 xe_pde_encode(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level level);
-
-u64 xe_pte_encode(struct xe_bo *bo, u64 offset, enum xe_cache_level cache,
- u32 pt_level);
-
#endif
#ifndef _XE_PT_TYPES_H_
#define _XE_PT_TYPES_H_
+#include <linux/types.h>
+
#include "xe_pt_walk.h"
+struct xe_bo;
+struct xe_vma;
+
enum xe_cache_level {
XE_CACHE_NONE,
XE_CACHE_WT,
#endif
};
+struct xe_pt_ops {
+ u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
+ enum xe_cache_level cache, u32 pt_level);
+ u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
+ enum xe_cache_level cache, u32 pt_level);
+ u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
+ const enum xe_cache_level cache);
+};
+
struct xe_pt_entry {
struct xe_pt *pt;
u64 pte;
.vm_free = xe_vm_free,
};
+static u64 pde_encode_cache(enum xe_cache_level cache)
+{
+ /* FIXME: I don't think the PPAT handling is correct for MTL */
+
+ if (cache != XE_CACHE_NONE)
+ return PPAT_CACHED_PDE;
+
+ return PPAT_UNCACHED;
+}
+
+static u64 pte_encode_cache(enum xe_cache_level cache)
+{
+ /* FIXME: I don't think the PPAT handling is correct for MTL */
+ switch (cache) {
+ case XE_CACHE_NONE:
+ return PPAT_UNCACHED;
+ case XE_CACHE_WT:
+ return PPAT_DISPLAY_ELLC;
+ default:
+ return PPAT_CACHED;
+ }
+}
+
+static u64 pte_encode_ps(u32 pt_level)
+{
+ /* XXX: Does hw support 1 GiB pages? */
+ XE_WARN_ON(pt_level > 2);
+
+ if (pt_level == 1)
+ return XE_PDE_PS_2M;
+ else if (pt_level == 2)
+ return XE_PDPE_PS_1G;
+
+ return 0;
+}
+
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ const enum xe_cache_level cache)
+{
+ u64 pde;
+
+ pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pde |= pde_encode_cache(cache);
+
+ return pde;
+}
+
+static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ enum xe_cache_level cache, u32 pt_level)
+{
+ u64 pte;
+
+ pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pte |= pte_encode_cache(cache);
+ pte |= pte_encode_ps(pt_level);
+
+ if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
+ pte |= XE_PPGTT_PTE_DM;
+
+ return pte;
+}
+
+static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
+ enum xe_cache_level cache, u32 pt_level)
+{
+ pte |= XE_PAGE_PRESENT;
+
+ if (likely(!xe_vma_read_only(vma)))
+ pte |= XE_PAGE_RW;
+
+ pte |= pte_encode_cache(cache);
+ pte |= pte_encode_ps(pt_level);
+
+ if (unlikely(xe_vma_is_null(vma)))
+ pte |= XE_PTE_NULL;
+
+ return pte;
+}
+
+static const struct xe_pt_ops xelp_pt_ops = {
+ .pte_encode_bo = xelp_pte_encode_bo,
+ .pte_encode_vma = xelp_pte_encode_vma,
+ .pde_encode_bo = xelp_pde_encode_bo,
+};
+
static void xe_vma_op_work_func(struct work_struct *w);
static void vm_destroy_work_func(struct work_struct *w);
INIT_LIST_HEAD(&vm->extobj.list);
+ vm->pt_ops = &xelp_pt_ops;
+
if (!(flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_get(xe);
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- return xe_pde_encode(vm->pt_root[tile->id]->bo, 0,
- XE_CACHE_WB);
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
+ XE_CACHE_WB);
}
static struct dma_fence *
bool munmap_rebind_inflight;
} async_ops;
+ const struct xe_pt_ops *pt_ops;
+
/** @userptr: user pointer state */
struct {
/**