struct ww_acquire_ctx ww;
struct xe_vm *vm = NULL;
struct xe_bo *bo;
- unsigned bo_flags = XE_BO_CREATE_USER_BIT;
+ unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
u32 handle;
int err;
vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS) {
for (i = 0; i < args->num_syncs; i++) {
struct dma_fence *fence = syncs[i].fence;
+
if (fence) {
err = xe_vm_async_fence_wait_start(fence);
if (err)
struct xe_device;
struct xe_gt;
-#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock);
+#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock)
int xe_execlist_init(struct xe_gt *gt);
struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
#define for_each_hw_engine(hwe__, gt__, id__) \
for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
- for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
+ for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
xe_hw_engine_is_valid((hwe__)))
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
- BUILD_BUG_ON(SOFT_SCRATCH_COUNT != GUC_CTL_MAX_DWORDS + 2);
+ BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
unsigned long idx;
- unsigned count = 0;
+ unsigned int count = 0;
const struct {
struct xe_reg reg;
bool skip;
ptr = xa_store(&ct->fence_lookup,
g2h_fence.seqno,
&g2h_fence, GFP_KERNEL);
- if (IS_ERR(ptr)) {
+ if (IS_ERR(ptr))
return PTR_ERR(ptr);
- }
goto retry_same_fence;
} else if (unlikely(ret)) {
struct guc_policies {
u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
- /* In micro seconds. How much time to allow before DPC processing is
+ /*
+ * In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
- * Typically 1000s of micro seconds (example only, not granularity). */
+ * Typically 1000s of micro seconds (example only, not granularity).
+ */
u32 dpc_promote_time;
/* Must be set to take these new values. */
u32 is_valid;
- /* Max number of WIs to process per call. A large value may keep CS
- * idle. */
+ /*
+ * Max number of WIs to process per call. A large value may keep CS
+ * idle.
+ */
u32 max_num_work_items;
u32 global_flags;
u32 data) \
{ \
XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
- \
+\
policy->h2g.klv[policy->count].kl = \
FIELD_PREP(GUC_KLV_0_KEY, \
GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
struct xe_gt *gt = huc_to_gt(huc);
struct xe_guc *guc = huc_to_guc(huc);
int ret;
+
if (xe_uc_fw_is_disabled(&huc->fw))
return 0;
}
static void gt_irq_handler(struct xe_tile *tile,
- u32 master_ctl, long unsigned int *intr_dw,
+ u32 master_ctl, unsigned long *intr_dw,
u32 *identity)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_device *xe = arg;
struct xe_tile *tile = xe_device_get_root_tile(xe);
u32 master_ctl, gu_misc_iir;
- long unsigned int intr_dw[2];
+ unsigned long intr_dw[2];
u32 identity[32];
master_ctl = xelp_intr_disable(xe);
struct xe_device *xe = arg;
struct xe_tile *tile;
u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
- long unsigned int intr_dw[2];
+ unsigned long intr_dw[2];
u32 identity[32];
u8 id;
static irq_handler_t xe_irq_handler(struct xe_device *xe)
{
- if (GRAPHICS_VERx100(xe) >= 1210) {
+ if (GRAPHICS_VERx100(xe) >= 1210)
return dg1_irq_handler;
- } else {
+ else
return xelp_irq_handler;
- }
}
static void irq_uninstall(struct drm_device *drm, void *arg)
};
static const u8 mtl_rcs_offsets[] = {
- NOP(1),
- LRI(15, POSTED),
- REG16(0x244),
- REG(0x034),
- REG(0x030),
- REG(0x038),
- REG(0x03c),
- REG(0x168),
- REG(0x140),
- REG(0x110),
- REG(0x1c0),
- REG(0x1c4),
- REG(0x1c8),
- REG(0x180),
- REG16(0x2b4),
- REG(0x120),
- REG(0x124),
-
- NOP(1),
- LRI(9, POSTED),
- REG16(0x3a8),
- REG16(0x28c),
- REG16(0x288),
- REG16(0x284),
- REG16(0x280),
- REG16(0x27c),
- REG16(0x278),
- REG16(0x274),
- REG16(0x270),
-
- NOP(2),
- LRI(2, POSTED),
- REG16(0x5a8),
- REG16(0x5ac),
-
- NOP(6),
- LRI(1, 0),
- REG(0x0c8),
-
- END
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(2),
+ LRI(2, POSTED),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
};
#undef END
#define EMIT_COPY_DW 10
static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u64 src_ofs, u64 dst_ofs, unsigned int size,
- unsigned pitch)
+ unsigned int pitch)
{
XE_BUG_ON(size / pitch > S16_MAX);
XE_BUG_ON(pitch / 4 > S16_MAX);
do {
u64 addr = ppgtt_ofs + ofs * 8;
+
chunk = min(update->qwords, 0x1ffU);
/* Ensure populatefn can do memset64 by aligning bb->cs */
bool atomic)
{
int err;
+
lockdep_assert_held(>->pcode.lock);
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
{
u32 val = entry->set_bits;
const char *access_str = "(invalid)";
- unsigned range_bit = 2;
+ unsigned int range_bit = 2;
u32 range_start, range_end;
bool deny;
cur->node = NULL;
cur->mem_type = XE_PL_TT;
XE_WARN_ON(res && start + size > res->size);
- return;
}
static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
}
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
- unsigned size)
+ unsigned int size)
{
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
}
entry__, \
},
-XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \
+XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
-XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, \
+XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
static struct xe_gt *
case XE_VM_BIND_OP_PREFETCH:
vma = xe_vm_find_overlapping_vma(vm, addr, range);
if (XE_IOCTL_DBG(xe, !vma))
- return -ENODATA; /* Not an actual error, IOCTL
- cleans up returns and 0 */
+ /* Not an actual error, IOCTL cleans up returns and 0 */
+ return -ENODATA;
if (XE_IOCTL_DBG(xe, (xe_vma_start(vma) != addr ||
xe_vma_end(vma) != addr + range) && !async))
return -EINVAL;
break;
case XE_VM_BIND_OP_UNMAP_ALL:
if (XE_IOCTL_DBG(xe, list_empty(&bo->ttm.base.gpuva.list)))
- return -ENODATA; /* Not an actual error, IOCTL
- cleans up returns and 0 */
+ /* Not an actual error, IOCTL cleans up returns and 0 */
+ return -ENODATA;
break;
default:
XE_BUG_ON("NOT POSSIBLE");
* the list of userptrs mapped in the VM, the list of engines using this VM, and
* the array of external BOs mapped in the VM. When adding or removing any of the
* aforemented state from the VM should acquire this lock in write mode. The VM
- * bind path also acquires this lock in write while while the exec / compute
- * mode rebind worker acquire this lock in read mode.
+ * bind path also acquires this lock in write while the exec / compute mode
+ * rebind worker acquire this lock in read mode.
*
* VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
* slots which is shared with any private BO in the VM. Expected to be acquired