#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
+#include "xe_hw_engine_group.h"
#include "xe_macros.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
bool write_locked, skip_retry = false;
ktime_t end = 0;
int err = 0;
+ struct xe_hw_engine_group *group;
+ enum xe_hw_engine_group_execution_mode mode, previous_mode;
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
}
}
+ group = q->hwe->hw_engine_group;
+ mode = xe_hw_engine_group_find_exec_mode(q);
+
+ if (mode == EXEC_MODE_DMA_FENCE) {
+ err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+ if (err)
+ goto err_syncs;
+ }
+
retry:
if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_unlock_list;
+ goto err_hw_exec_mode;
}
if (!args->num_batch_buffer) {
spin_unlock(&xe->ttm.lru_lock);
}
+ if (mode == EXEC_MODE_LR)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
err_repin:
if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock);
up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
goto retry;
+err_hw_exec_mode:
+ if (mode == EXEC_MODE_DMA_FENCE)
+ xe_hw_engine_group_put(group);
err_syncs:
while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
{
struct xe_hw_engine_group *group = arg;
+ destroy_workqueue(group->resume_wq);
kfree(group);
}
+static void
+hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
+{
+ struct xe_exec_queue *q;
+ struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work);
+ int err;
+ enum xe_hw_engine_group_execution_mode previous_mode;
+
+ err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+ if (err)
+ return;
+
+ if (previous_mode == EXEC_MODE_LR)
+ goto put;
+
+ list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ if (!xe_vm_in_fault_mode(q->vm))
+ continue;
+
+ q->ops->resume(q);
+ }
+
+put:
+ xe_hw_engine_group_put(group);
+}
+
static struct xe_hw_engine_group *
hw_engine_group_alloc(struct xe_device *xe)
{
if (!group)
return ERR_PTR(-ENOMEM);
+ group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0);
+ if (!group->resume_wq)
+ return ERR_PTR(-ENOMEM);
+
init_rwsem(&group->mode_sem);
+ INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func);
INIT_LIST_HEAD(&group->exec_queue_list);
err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group);
if (err)
goto err_suspend;
- queue_work(group->resume_wq, &group->resume_work);
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
}
list_add(&q->hw_engine_group_link, &group->exec_queue_list);
up_write(&group->mode_sem);
}
+/**
+ * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's
+ * faulting LR jobs
+ * @group: The hw engine group
+ */
+void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group)
+{
+ queue_work(group->resume_wq, &group->resume_work);
+}
+
/**
* xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
* @group: The hw engine group
{
int err;
struct xe_exec_queue *q;
+ bool need_resume = false;
lockdep_assert_held_write(&group->mode_sem);
if (!xe_vm_in_fault_mode(q->vm))
continue;
+ need_resume = true;
q->ops->suspend(q);
}
goto err_suspend;
}
+ if (need_resume)
+ xe_hw_engine_group_resume_faulting_lr_jobs(group);
+
return 0;
err_suspend:
{
up_read(&group->mode_sem);
}
+
+/**
+ * xe_hw_engine_group_find_exec_mode() - Find the execution mode for this exec queue
+ * @q: The exec_queue
+ */
+enum xe_hw_engine_group_execution_mode
+xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q)
+{
+ if (xe_vm_in_fault_mode(q->vm))
+ return EXEC_MODE_LR;
+ else
+ return EXEC_MODE_DMA_FENCE;
+}