static bool xe_driver_flr_disabled(struct xe_device *xe)
{
- return xe_mmio_read32(xe_root_mmio_gt(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
}
/*
static void __xe_driver_flr(struct xe_device *xe)
{
const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
return;
}
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
/* Trigger the actual Driver-FLR */
- xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
+ xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
/* Wait for hardware teardown to complete */
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
}
/* Wait for hardware/firmware re-init to complete */
- ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
}
/* Clear sticky completion status */
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
}
static void xe_driver_flr(struct xe_device *xe)
return err;
}
-static bool verify_lmem_ready(struct xe_gt *gt)
+static bool verify_lmem_ready(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+ u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
return !!val;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
unsigned long timeout, start;
if (!IS_DGFX(xe))
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(gt))
+ if (verify_lmem_ready(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
msleep(20);
- } while (!verify_lmem_ready(gt));
+ } while (!verify_lmem_ready(xe));
drm_dbg(&xe->drm, "lmem ready after %ums",
jiffies_to_msecs(jiffies - start));
*/
void xe_device_wmb(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, VF_CAP_REG, 0);
+ xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
}
/**
if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
return;
- xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+ xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
/*
* FIXME: We can likely do better here with our choice of
* timeout. Currently we just assume the worst case, i.e. 150us,
* scenario on current platforms if all cache entries are
* transient and need to be flushed..
*/
- if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+ if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
150, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
return;
spin_lock(>->global_invl_lock);
- xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
+ xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
+ if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(>->global_invl_lock);