}
/* Check last CCS value, or at least last value in page. */
- offset = xe_device_ccs_bytes(gt->xe, bo->size);
+ offset = xe_device_ccs_bytes(gt_to_xe(gt), bo->size);
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test,
static int bb_prefetch(struct xe_gt *gt)
{
- struct xe_device *xe = gt->xe;
+ struct xe_device *xe = gt_to_xe(gt);
if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
/*
u64 start, end;
/* Display may have allocated inside ggtt, so be careful with clearing here */
- xe_device_mem_access_get(ggtt->gt->xe);
+ xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
xe_ggtt_clear(ggtt, start, end - start);
xe_ggtt_invalidate(ggtt->gt);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(ggtt->gt->xe);
+ xe_device_mem_access_put(gt_to_xe(ggtt->gt));
}
int xe_ggtt_init(struct xe_gt *gt, struct xe_ggtt *ggtt)
if (err)
return err;
- xe_device_mem_access_get(ggtt->gt->xe);
+ xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err)
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(ggtt->gt->xe);
+ xe_device_mem_access_put(gt_to_xe(ggtt->gt));
return err;
}
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
{
- xe_device_mem_access_get(ggtt->gt->xe);
+ xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
xe_ggtt_invalidate(ggtt->gt);
mutex_unlock(&ggtt->lock);
- xe_device_mem_access_put(ggtt->gt->xe);
+ xe_device_mem_access_put(gt_to_xe(ggtt->gt));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return gt->info.type == XE_GT_TYPE_MEDIA;
}
-#define gt_to_xe(gt__) \
- _Generic(gt__, \
- const struct xe_gt *: (const struct xe_device *)((gt__)->xe), \
- struct xe_gt *: (gt__)->xe)
-
static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
{
struct xe_device *xe = gt_to_xe(gt);
#include "xe_device_types.h"
#define xe_gt_printk(_gt, _level, _fmt, ...) \
- drm_##_level(&(_gt)->xe->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ drm_##_level(>_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_err(_gt, _fmt, ...) \
xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
- drm_WARN(&(_gt)->xe->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ drm_WARN(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
- drm_WARN_ONCE(&(_gt)->xe->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ drm_WARN_ONCE(>_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_WARN_ON(_gt, _condition) \
xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
XE_BUG_ON(len > MAX_TLB_INVALIDATION_LEN);
- xe_device_mem_access_get(gt->xe);
+ xe_device_mem_access_get(xe);
ret = send_tlb_invalidation(>->uc.guc, fence, action, len);
- xe_device_mem_access_put(gt->xe);
+ xe_device_mem_access_put(xe);
return ret;
}
TLB_INVALIDATION_SEQNO_MAX;
if (!expected_seqno)
expected_seqno = 1;
- if (drm_WARN_ON(>->xe->drm, expected_seqno != msg[0])) {
- drm_err(>->xe->drm, "TLB expected_seqno(%d) != msg(%u)\n",
+ if (drm_WARN_ON(>_to_xe(gt)->drm, expected_seqno != msg[0])) {
+ drm_err(>_to_xe(gt)->drm, "TLB expected_seqno(%d) != msg(%u)\n",
expected_seqno, msg[0]);
}
NUM_STEERING_TYPES
};
+#define gt_to_tile(gt__) \
+ _Generic(gt__, \
+ const struct xe_gt *: (const struct xe_tile *)((gt__)->tile), \
+ struct xe_gt *: (gt__)->tile)
+
+#define gt_to_xe(gt__) \
+ _Generic(gt__, \
+ const struct xe_gt *: (const struct xe_device *)(gt_to_tile(gt__)->xe), \
+ struct xe_gt *: gt_to_tile(gt__)->xe)
+
/**
* struct xe_gt - A "Graphics Technology" unit of the GPU
*
* separate GTs within a tile.
*/
struct xe_gt {
- /** @xe: backpointer to XE device */
- struct xe_device *xe;
+ /** @tile: Backpointer to GT's tile */
+ struct xe_tile *tile;
/** @info: GT info */
struct {
return err;
/* actual size */
- if (unlikely(gt->xe->info.platform == XE_DG1)) {
- *tile_size = pci_resource_len(to_pci_dev(gt->xe->drm.dev), GEN12_LMEM_BAR);
+ if (unlikely(gt_to_xe(gt)->info.platform == XE_DG1)) {
+ *tile_size = pci_resource_len(to_pci_dev(gt_to_xe(gt)->drm.dev), GEN12_LMEM_BAR);
*tile_offset = 0;
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
}
/* minus device usage */
- if (gt->xe->info.has_flat_ccs) {
+ if (gt_to_xe(gt)->info.has_flat_ccs) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
} else {
unsigned int i;
u32 mocs;
- mocs_dbg(>->xe->drm, "entries:%d\n", info->n_entries);
+ mocs_dbg(>_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
"Unused entries index should have been defined\n");
for (i = 0;
i++) {
struct xe_reg reg = XE_REG(addr + i * 4);
- mocs_dbg(>->xe->drm, "%d 0x%x 0x%x\n", i, reg.addr, mocs);
+ mocs_dbg(>_to_xe(gt)->drm, "%d 0x%x 0x%x\n", i, reg.addr, mocs);
xe_mmio_write32(gt, reg, mocs);
}
}
unsigned int i;
u32 l3cc;
- mocs_dbg(>->xe->drm, "entries:%d\n", info->n_entries);
+ mocs_dbg(>_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
for (i = 0;
i < (info->n_entries + 1) / 2 ?
(l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
i++) {
- mocs_dbg(>->xe->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).addr,
+ mocs_dbg(>_to_xe(gt)->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).addr,
l3cc);
xe_mmio_write32(gt, LNCFCMOCS(i), l3cc);
}
{
struct xe_mocs_info table;
- get_mocs_settings(gt->xe, &table);
+ get_mocs_settings(gt_to_xe(gt), &table);
gt->mocs.uc_index = table.uc_index;
gt->mocs.wb_index = table.wb_index;
}
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
- flags = get_mocs_settings(gt->xe, &table);
- mocs_dbg(>->xe->drm, "flag:0x%x\n", flags);
+ flags = get_mocs_settings(gt_to_xe(gt), &table);
+ mocs_dbg(>_to_xe(gt)->drm, "flag:0x%x\n", flags);
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt, &table, GLOBAL_MOCS(0).addr);
gt = &tile->primary_gt;
gt->info.id = id;
- gt->xe = xe;
+ gt->tile = tile;
if (id == 0) {
gt->info.type = XE_GT_TYPE_MAIN;
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
- if (GRAPHICS_VERx100(xe_walk->gt->xe) >= 1250 && level == 1 &&
+ if (GRAPHICS_VERx100(gt_to_xe(xe_walk->gt)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
flags |= XE_PDE_64K;
static s64 detect_bar2_dgfx(struct xe_gt *gt, struct xe_ttm_stolen_mgr *mgr)
{
- struct pci_dev *pdev = to_pci_dev(gt->xe->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(gt_to_xe(gt)->drm.dev);
u64 stolen_size;
u64 tile_offset;
u64 tile_size;
u64 vram_size;
if (xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset)) {
- drm_err(>->xe->drm, "Querying total vram size failed\n");
+ drm_err(>_to_xe(gt)->drm, "Querying total vram size failed\n");
return 0;
}
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64(gt, DSMBASE) & BDSM_MASK) - tile_offset;
- if (drm_WARN_ON(>->xe->drm, tile_size < mgr->stolen_base))
+ if (drm_WARN_ON(>_to_xe(gt)->drm, tile_size < mgr->stolen_base))
return 0;
stolen_size = tile_size - mgr->stolen_base;