u32 handle;
int err;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags &
struct drm_xe_gem_mmap_offset *args = data;
struct drm_gem_object *gem_obj;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags))
return -EFAULT;
if (XE_IOCTL_ERR(xe, ext.property >=
- ARRAY_SIZE(engine_set_property_funcs)))
+ ARRAY_SIZE(engine_set_property_funcs)) ||
+ XE_IOCTL_ERR(xe, ext.pad))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(engine_set_property_funcs));
if (XE_IOCTL_ERR(xe, err))
return -EFAULT;
- if (XE_IOCTL_ERR(xe, ext.name >=
+ if (XE_IOCTL_ERR(xe, ext.pad) ||
+ XE_IOCTL_ERR(xe, ext.name >=
ARRAY_SIZE(engine_user_extension_funcs)))
return -EINVAL;
int len;
int err;
- if (XE_IOCTL_ERR(xe, args->flags))
+ if (XE_IOCTL_ERR(xe, args->flags) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
len = args->width * args->num_placements;
struct drm_xe_engine_get_property *args = data;
struct xe_engine *e;
+ if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
mutex_lock(&xef->engine.lock);
e = xa_load(&xef->engine.xa, args->engine_id);
mutex_unlock(&xef->engine.lock);
struct drm_xe_engine_destroy *args = data;
struct xe_engine *e;
- if (XE_IOCTL_ERR(xe, args->pad))
+ if (XE_IOCTL_ERR(xe, args->pad) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
mutex_lock(&xef->engine.lock);
int ret;
u32 idx;
+ if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
e = xe_engine_lookup(xef, args->engine_id);
if (XE_IOCTL_ERR(xe, !e))
return -ENOENT;
bool write_locked;
int err = 0;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) ||
+ XE_IOCTL_ERR(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
engine = xe_engine_lookup(xef, args->engine_id);
bool allowed;
int ret = 0;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_MMIO_FLAGS))
struct drm_xe_device_query *query = data;
u32 idx;
- if (XE_IOCTL_ERR(xe, query->extensions != 0))
+ if (XE_IOCTL_ERR(xe, query->extensions) ||
+ XE_IOCTL_ERR(xe, query->reserved[0] || query->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
return -EFAULT;
if (XE_IOCTL_ERR(xe, sync_in.flags &
- ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)))
+ ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) ||
+ XE_IOCTL_ERR(xe, sync_in.pad) ||
+ XE_IOCTL_ERR(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL;
signal = sync_in.flags & DRM_XE_SYNC_SIGNAL;
return -EFAULT;
if (XE_IOCTL_ERR(xe, ext.property >=
- ARRAY_SIZE(vm_set_property_funcs)))
+ ARRAY_SIZE(vm_set_property_funcs)) ||
+ XE_IOCTL_ERR(xe, ext.pad) ||
+ XE_IOCTL_ERR(xe, ext.reserved[0] || ext.reserved[1]))
return -EINVAL;
return vm_set_property_funcs[ext.property](xe, vm, ext.value);
if (XE_IOCTL_ERR(xe, err))
return -EFAULT;
- if (XE_IOCTL_ERR(xe, ext.name >=
+ if (XE_IOCTL_ERR(xe, ext.pad) ||
+ XE_IOCTL_ERR(xe, ext.name >=
ARRAY_SIZE(vm_user_extension_funcs)))
return -EINVAL;
int err;
u32 flags = 0;
+ if (XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
return -EINVAL;
struct drm_xe_vm_destroy *args = data;
struct xe_vm *vm;
- if (XE_IOCTL_ERR(xe, args->pad))
+ if (XE_IOCTL_ERR(xe, args->pad) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
vm = xe_vm_lookup(xef, args->vm_id);
int i;
if (XE_IOCTL_ERR(xe, args->extensions) ||
+ XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]) ||
XE_IOCTL_ERR(xe, !args->num_binds) ||
XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
return -EINVAL;
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 region = (*bind_ops)[i].region;
+ if (XE_IOCTL_ERR(xe, (*bind_ops)[i].pad) ||
+ XE_IOCTL_ERR(xe, (*bind_ops)[i].reserved[0] ||
+ (*bind_ops)[i].reserved[1])) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
if (i == 0) {
*async = !!(op & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_ERR(xe, !*async) ||
struct xe_vma **vmas = NULL;
int num_vmas = 0, err = 0, idx;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) ||
+ XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->property > ARRAY_SIZE(madvise_funcs)))
args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR;
unsigned long timeout = args->timeout;
- if (XE_IOCTL_ERR(xe, args->extensions))
+ if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) ||
+ XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_FLAGS))