drm/xe/uapi: Kill DRM_XE_UFENCE_WAIT_VM_ERROR
authorMatthew Brost <matthew.brost@intel.com>
Thu, 14 Sep 2023 20:40:49 +0000 (13:40 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:43:17 +0000 (11:43 -0500)
This is not used nor does it align VM async document, kill this.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm_types.h
drivers/gpu/drm/xe/xe_wait_user_fence.c
include/uapi/drm/xe_drm.h

index c7e3b1f..3132114 100644 (file)
@@ -1621,9 +1621,6 @@ void xe_vm_close_and_put(struct xe_vm *vm)
                xe_vma_destroy_unlocked(vma);
        }
 
-       if (vm->async_ops.error_capture.addr)
-               wake_up_all(&vm->async_ops.error_capture.wq);
-
        xe_assert(xe, list_empty(&vm->extobj.list));
        up_write(&vm->lock);
 
index 9a1075a..828ed0f 100644 (file)
@@ -215,17 +215,6 @@ struct xe_vm {
                struct work_struct work;
                /** @lock: protects list of pending async VM ops and fences */
                spinlock_t lock;
-               /** @error_capture: error capture state */
-               struct {
-                       /** @mm: user MM */
-                       struct mm_struct *mm;
-                       /**
-                        * @addr: user pointer to copy error capture state too
-                        */
-                       u64 addr;
-                       /** @wq: user fence wait queue for VM errors */
-                       wait_queue_head_t wq;
-               } error_capture;
                /** @fence: fence state */
                struct {
                        /** @context: context of async fence */
index 3ac4cd2..7868690 100644 (file)
@@ -13,7 +13,6 @@
 #include "xe_device.h"
 #include "xe_gt.h"
 #include "xe_macros.h"
-#include "xe_vm.h"
 
 static int do_compare(u64 addr, u64 value, u64 mask, u16 op)
 {
@@ -81,8 +80,7 @@ static int check_hw_engines(struct xe_device *xe,
 }
 
 #define VALID_FLAGS    (DRM_XE_UFENCE_WAIT_SOFT_OP | \
-                        DRM_XE_UFENCE_WAIT_ABSTIME | \
-                        DRM_XE_UFENCE_WAIT_VM_ERROR)
+                        DRM_XE_UFENCE_WAIT_ABSTIME)
 #define MAX_OP         DRM_XE_UFENCE_WAIT_LTE
 
 static long to_jiffies_timeout(struct xe_device *xe,
@@ -137,11 +135,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
        struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
        struct drm_xe_engine_class_instance __user *user_eci =
                u64_to_user_ptr(args->instances);
-       struct xe_vm *vm = NULL;
        u64 addr = args->addr;
        int err;
-       bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP ||
-               args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR;
+       bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP;
        long timeout;
        ktime_t start;
 
@@ -162,8 +158,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
        if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
                return -EINVAL;
 
-       if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) &&
-                        addr & 0x7))
+       if (XE_IOCTL_DBG(xe, addr & 0x7))
                return -EINVAL;
 
        if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
@@ -181,22 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
                        return -EINVAL;
        }
 
-       if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) {
-               if (XE_IOCTL_DBG(xe, args->vm_id >> 32))
-                       return -EINVAL;
-
-               vm = xe_vm_lookup(to_xe_file(file), args->vm_id);
-               if (XE_IOCTL_DBG(xe, !vm))
-                       return -ENOENT;
-
-               if (XE_IOCTL_DBG(xe, !vm->async_ops.error_capture.addr)) {
-                       xe_vm_put(vm);
-                       return -EOPNOTSUPP;
-               }
-
-               addr = vm->async_ops.error_capture.addr;
-       }
-
        timeout = to_jiffies_timeout(xe, args);
 
        start = ktime_get();
@@ -207,15 +186,8 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
         * hardware engine. Open coding as 'do_compare' can sleep which doesn't
         * work with the wait_event_* macros.
         */
-       if (vm)
-               add_wait_queue(&vm->async_ops.error_capture.wq, &w_wait);
-       else
-               add_wait_queue(&xe->ufence_wq, &w_wait);
+       add_wait_queue(&xe->ufence_wq, &w_wait);
        for (;;) {
-               if (vm && xe_vm_is_closed(vm)) {
-                       err = -ENODEV;
-                       break;
-               }
                err = do_compare(addr, args->value, args->mask, args->op);
                if (err <= 0)
                        break;
@@ -232,12 +204,7 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
 
                timeout = wait_woken(&w_wait, TASK_INTERRUPTIBLE, timeout);
        }
-       if (vm) {
-               remove_wait_queue(&vm->async_ops.error_capture.wq, &w_wait);
-               xe_vm_put(vm);
-       } else {
-               remove_wait_queue(&xe->ufence_wq, &w_wait);
-       }
+       remove_wait_queue(&xe->ufence_wq, &w_wait);
 
        if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) {
                args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
index e7cf42c..f13974f 100644 (file)
@@ -905,18 +905,10 @@ struct drm_xe_wait_user_fence {
        /** @extensions: Pointer to the first extension struct, if any */
        __u64 extensions;
 
-       union {
-               /**
-                * @addr: user pointer address to wait on, must qword aligned
-                */
-               __u64 addr;
-
-               /**
-                * @vm_id: The ID of the VM which encounter an error used with
-                * DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be clear.
-                */
-               __u64 vm_id;
-       };
+       /**
+        * @addr: user pointer address to wait on, must qword aligned
+        */
+       __u64 addr;
 
 #define DRM_XE_UFENCE_WAIT_EQ  0
 #define DRM_XE_UFENCE_WAIT_NEQ 1
@@ -929,7 +921,6 @@ struct drm_xe_wait_user_fence {
 
 #define DRM_XE_UFENCE_WAIT_SOFT_OP     (1 << 0)        /* e.g. Wait on VM bind */
 #define DRM_XE_UFENCE_WAIT_ABSTIME     (1 << 1)
-#define DRM_XE_UFENCE_WAIT_VM_ERROR    (1 << 2)
        /** @flags: wait flags */
        __u16 flags;