accel/ivpu: Set command queue management capability based on HWS
authorKarol Wachowski <karol.wachowski@intel.com>
Tue, 7 Jan 2025 17:32:32 +0000 (18:32 +0100)
committerJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Thu, 9 Jan 2025 08:35:44 +0000 (09:35 +0100)
Control explicit command queue management capability bit based on
scheduling mode. Capability will be available only when hardware
scheduling mode is set.

There is no point of allowing user space to create and destroy command
queues with OS schedling mode because FW does not support all required
functionalities for correct command queue management with OS scheduling.

Return -ENODEV from command queue create/destroy/submit IOCTLs.

Remove is_valid field from struct ivpu_job_cmdq

Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-10-maciej.falkowski@linux.intel.com
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_job.h

index 9b0d998..6a80d62 100644 (file)
@@ -127,23 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
        kref_put(&file_priv->ref, file_priv_release);
 }
 
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
 {
-       switch (args->index) {
+       switch (capability) {
        case DRM_IVPU_CAP_METRIC_STREAMER:
-               args->value = 1;
-               break;
+               return true;
        case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
-               args->value = 1;
-               break;
+               return true;
        case DRM_IVPU_CAP_MANAGE_CMDQ:
-               args->value = 1;
-               break;
+               return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
        default:
-               return -EINVAL;
+               return false;
        }
-
-       return 0;
 }
 
 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -203,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
                args->value = vdev->hw->sku;
                break;
        case DRM_IVPU_PARAM_CAPABILITIES:
-               ret = ivpu_get_capabilities(vdev, args);
+               args->value = ivpu_is_capable(vdev, args->index);
                break;
        default:
                ret = -EINVAL;
index b57d878..d53902b 100644 (file)
@@ -213,6 +213,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
 int ivpu_boot(struct ivpu_device *vdev);
 int ivpu_shutdown(struct ivpu_device *vdev);
 void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
 
 static inline u8 ivpu_revision(struct ivpu_device *vdev)
 {
index c55de97..37ea92e 100644 (file)
@@ -123,7 +123,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
 
        cmdq->priority = priority;
        cmdq->is_legacy = is_legacy;
-       cmdq->is_valid = true;
 
        ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
                              &file_priv->cmdq_id_next, GFP_KERNEL);
@@ -307,7 +306,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
        lockdep_assert_held(&file_priv->lock);
 
        cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
-       if (!cmdq || !cmdq->is_valid) {
+       if (!cmdq) {
                ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
                return NULL;
        }
@@ -832,6 +831,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
        struct ivpu_file_priv *file_priv = file->driver_priv;
        struct drm_ivpu_cmdq_submit *args = data;
 
+       if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
                return -EINVAL;
 
@@ -857,6 +859,9 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
        struct drm_ivpu_cmdq_create *args = data;
        struct ivpu_cmdq *cmdq;
 
+       if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
                return -EINVAL;
 
@@ -880,24 +885,17 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
        u32 cmdq_id;
        int ret = 0;
 
+       if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        mutex_lock(&file_priv->lock);
 
        cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
-       if (!cmdq || !cmdq->is_valid || cmdq->is_legacy) {
+       if (!cmdq || cmdq->is_legacy) {
                ret = -ENOENT;
                goto unlock;
        }
 
-       /*
-        * There is no way to stop executing jobs per command queue
-        * in OS scheduling mode, mark command queue as invalid instead
-        * and it will be freed together with context release.
-        */
-       if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) {
-               cmdq->is_valid = false;
-               goto unlock;
-       }
-
        cmdq_id = cmdq->id;
        ivpu_cmdq_destroy(file_priv, cmdq);
        ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
index ff77ee1..2e301c2 100644 (file)
@@ -31,7 +31,6 @@ struct ivpu_cmdq {
        u32 id;
        u32 db_id;
        u8 priority;
-       bool is_valid;
        bool is_legacy;
 };