drm/xe: Introduce xe_engine_is_idle()
[linux-2.6-microblaze.git] / drivers / gpu / drm / xe / xe_engine.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_engine.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
13
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_fence.h"
17 #include "xe_lrc.h"
18 #include "xe_macros.h"
19 #include "xe_migrate.h"
20 #include "xe_pm.h"
21 #include "xe_trace.h"
22 #include "xe_vm.h"
23
24 static struct xe_engine *__xe_engine_create(struct xe_device *xe,
25                                             struct xe_vm *vm,
26                                             u32 logical_mask,
27                                             u16 width, struct xe_hw_engine *hwe,
28                                             u32 flags)
29 {
30         struct xe_engine *e;
31         struct xe_gt *gt = hwe->gt;
32         int err;
33         int i;
34
35         e = kzalloc(sizeof(*e) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
36         if (!e)
37                 return ERR_PTR(-ENOMEM);
38
39         kref_init(&e->refcount);
40         e->flags = flags;
41         e->hwe = hwe;
42         e->gt = gt;
43         if (vm)
44                 e->vm = xe_vm_get(vm);
45         e->class = hwe->class;
46         e->width = width;
47         e->logical_mask = logical_mask;
48         e->fence_irq = &gt->fence_irq[hwe->class];
49         e->ring_ops = gt->ring_ops[hwe->class];
50         e->ops = gt->engine_ops;
51         INIT_LIST_HEAD(&e->persistent.link);
52         INIT_LIST_HEAD(&e->compute.link);
53         INIT_LIST_HEAD(&e->multi_gt_link);
54
55         /* FIXME: Wire up to configurable default value */
56         e->sched_props.timeslice_us = 1 * 1000;
57         e->sched_props.preempt_timeout_us = 640 * 1000;
58
59         if (xe_engine_is_parallel(e)) {
60                 e->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
61                 e->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
62         }
63         if (e->flags & ENGINE_FLAG_VM) {
64                 e->bind.fence_ctx = dma_fence_context_alloc(1);
65                 e->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
66         }
67
68         for (i = 0; i < width; ++i) {
69                 err = xe_lrc_init(e->lrc + i, hwe, e, vm, SZ_16K);
70                 if (err)
71                         goto err_lrc;
72         }
73
74         err = e->ops->init(e);
75         if (err)
76                 goto err_lrc;
77
78         return e;
79
80 err_lrc:
81         for (i = i - 1; i >= 0; --i)
82                 xe_lrc_finish(e->lrc + i);
83         kfree(e);
84         return ERR_PTR(err);
85 }
86
87 struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
88                                    u32 logical_mask, u16 width,
89                                    struct xe_hw_engine *hwe, u32 flags)
90 {
91         struct ww_acquire_ctx ww;
92         struct xe_engine *e;
93         int err;
94
95         if (vm) {
96                 err = xe_vm_lock(vm, &ww, 0, true);
97                 if (err)
98                         return ERR_PTR(err);
99         }
100         e = __xe_engine_create(xe, vm, logical_mask, width, hwe, flags);
101         if (vm)
102                 xe_vm_unlock(vm, &ww);
103
104         return e;
105 }
106
107 struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
108                                          struct xe_vm *vm,
109                                          enum xe_engine_class class, u32 flags)
110 {
111         struct xe_hw_engine *hwe, *hwe0 = NULL;
112         enum xe_hw_engine_id id;
113         u32 logical_mask = 0;
114
115         for_each_hw_engine(hwe, gt, id) {
116                 if (xe_hw_engine_is_reserved(hwe))
117                         continue;
118
119                 if (hwe->class == class) {
120                         logical_mask |= BIT(hwe->logical_instance);
121                         if (!hwe0)
122                                 hwe0 = hwe;
123                 }
124         }
125
126         if (!logical_mask)
127                 return ERR_PTR(-ENODEV);
128
129         return xe_engine_create(xe, vm, logical_mask, 1, hwe0, flags);
130 }
131
132 void xe_engine_destroy(struct kref *ref)
133 {
134         struct xe_engine *e = container_of(ref, struct xe_engine, refcount);
135         struct xe_engine *engine, *next;
136
137         if (!(e->flags & ENGINE_FLAG_BIND_ENGINE_CHILD)) {
138                 list_for_each_entry_safe(engine, next, &e->multi_gt_list,
139                                          multi_gt_link)
140                         xe_engine_put(engine);
141         }
142
143         e->ops->fini(e);
144 }
145
146 void xe_engine_fini(struct xe_engine *e)
147 {
148         int i;
149
150         for (i = 0; i < e->width; ++i)
151                 xe_lrc_finish(e->lrc + i);
152         if (e->vm)
153                 xe_vm_put(e->vm);
154
155         kfree(e);
156 }
157
158 struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id)
159 {
160         struct xe_engine *e;
161
162         mutex_lock(&xef->engine.lock);
163         e = xa_load(&xef->engine.xa, id);
164         mutex_unlock(&xef->engine.lock);
165
166         if (e)
167                 xe_engine_get(e);
168
169         return e;
170 }
171
172 static int engine_set_priority(struct xe_device *xe, struct xe_engine *e,
173                                u64 value, bool create)
174 {
175         if (XE_IOCTL_ERR(xe, value > XE_ENGINE_PRIORITY_HIGH))
176                 return -EINVAL;
177
178         if (XE_IOCTL_ERR(xe, value == XE_ENGINE_PRIORITY_HIGH &&
179                          !capable(CAP_SYS_NICE)))
180                 return -EPERM;
181
182         return e->ops->set_priority(e, value);
183 }
184
185 static int engine_set_timeslice(struct xe_device *xe, struct xe_engine *e,
186                                 u64 value, bool create)
187 {
188         if (!capable(CAP_SYS_NICE))
189                 return -EPERM;
190
191         return e->ops->set_timeslice(e, value);
192 }
193
194 static int engine_set_preemption_timeout(struct xe_device *xe,
195                                          struct xe_engine *e, u64 value,
196                                          bool create)
197 {
198         if (!capable(CAP_SYS_NICE))
199                 return -EPERM;
200
201         return e->ops->set_preempt_timeout(e, value);
202 }
203
204 static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
205                                    u64 value, bool create)
206 {
207         if (XE_IOCTL_ERR(xe, !create))
208                 return -EINVAL;
209
210         if (XE_IOCTL_ERR(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
211                 return -EINVAL;
212
213         if (XE_IOCTL_ERR(xe, e->flags & ENGINE_FLAG_VM))
214                 return -EINVAL;
215
216         if (value) {
217                 struct xe_vm *vm = e->vm;
218                 int err;
219
220                 if (XE_IOCTL_ERR(xe, xe_vm_in_fault_mode(vm)))
221                         return -EOPNOTSUPP;
222
223                 if (XE_IOCTL_ERR(xe, !xe_vm_in_compute_mode(vm)))
224                         return -EOPNOTSUPP;
225
226                 if (XE_IOCTL_ERR(xe, e->width != 1))
227                         return -EINVAL;
228
229                 e->compute.context = dma_fence_context_alloc(1);
230                 spin_lock_init(&e->compute.lock);
231
232                 err = xe_vm_add_compute_engine(vm, e);
233                 if (XE_IOCTL_ERR(xe, err))
234                         return err;
235
236                 e->flags |= ENGINE_FLAG_COMPUTE_MODE;
237                 e->flags &= ~ENGINE_FLAG_PERSISTENT;
238         }
239
240         return 0;
241 }
242
243 static int engine_set_persistence(struct xe_device *xe, struct xe_engine *e,
244                                   u64 value, bool create)
245 {
246         if (XE_IOCTL_ERR(xe, !create))
247                 return -EINVAL;
248
249         if (XE_IOCTL_ERR(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
250                 return -EINVAL;
251
252         if (value)
253                 e->flags |= ENGINE_FLAG_PERSISTENT;
254         else
255                 e->flags &= ~ENGINE_FLAG_PERSISTENT;
256
257         return 0;
258 }
259
260 static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
261                                   u64 value, bool create)
262 {
263         if (XE_IOCTL_ERR(xe, !create))
264                 return -EINVAL;
265
266         if (!capable(CAP_SYS_NICE))
267                 return -EPERM;
268
269         return e->ops->set_job_timeout(e, value);
270 }
271
272 static int engine_set_acc_trigger(struct xe_device *xe, struct xe_engine *e,
273                                   u64 value, bool create)
274 {
275         if (XE_IOCTL_ERR(xe, !create))
276                 return -EINVAL;
277
278         if (XE_IOCTL_ERR(xe, !xe->info.supports_usm))
279                 return -EINVAL;
280
281         e->usm.acc_trigger = value;
282
283         return 0;
284 }
285
286 static int engine_set_acc_notify(struct xe_device *xe, struct xe_engine *e,
287                                  u64 value, bool create)
288 {
289         if (XE_IOCTL_ERR(xe, !create))
290                 return -EINVAL;
291
292         if (XE_IOCTL_ERR(xe, !xe->info.supports_usm))
293                 return -EINVAL;
294
295         e->usm.acc_notify = value;
296
297         return 0;
298 }
299
300 static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
301                                       u64 value, bool create)
302 {
303         if (XE_IOCTL_ERR(xe, !create))
304                 return -EINVAL;
305
306         if (XE_IOCTL_ERR(xe, !xe->info.supports_usm))
307                 return -EINVAL;
308
309         e->usm.acc_granularity = value;
310
311         return 0;
312 }
313
314 typedef int (*xe_engine_set_property_fn)(struct xe_device *xe,
315                                          struct xe_engine *e,
316                                          u64 value, bool create);
317
318 static const xe_engine_set_property_fn engine_set_property_funcs[] = {
319         [XE_ENGINE_SET_PROPERTY_PRIORITY] = engine_set_priority,
320         [XE_ENGINE_SET_PROPERTY_TIMESLICE] = engine_set_timeslice,
321         [XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT] = engine_set_preemption_timeout,
322         [XE_ENGINE_SET_PROPERTY_COMPUTE_MODE] = engine_set_compute_mode,
323         [XE_ENGINE_SET_PROPERTY_PERSISTENCE] = engine_set_persistence,
324         [XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT] = engine_set_job_timeout,
325         [XE_ENGINE_SET_PROPERTY_ACC_TRIGGER] = engine_set_acc_trigger,
326         [XE_ENGINE_SET_PROPERTY_ACC_NOTIFY] = engine_set_acc_notify,
327         [XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY] = engine_set_acc_granularity,
328 };
329
330 static int engine_user_ext_set_property(struct xe_device *xe,
331                                         struct xe_engine *e,
332                                         u64 extension,
333                                         bool create)
334 {
335         u64 __user *address = u64_to_user_ptr(extension);
336         struct drm_xe_ext_engine_set_property ext;
337         int err;
338         u32 idx;
339
340         err = __copy_from_user(&ext, address, sizeof(ext));
341         if (XE_IOCTL_ERR(xe, err))
342                 return -EFAULT;
343
344         if (XE_IOCTL_ERR(xe, ext.property >=
345                          ARRAY_SIZE(engine_set_property_funcs)))
346                 return -EINVAL;
347
348         idx = array_index_nospec(ext.property, ARRAY_SIZE(engine_set_property_funcs));
349         return engine_set_property_funcs[idx](xe, e, ext.value,  create);
350 }
351
352 typedef int (*xe_engine_user_extension_fn)(struct xe_device *xe,
353                                            struct xe_engine *e,
354                                            u64 extension,
355                                            bool create);
356
357 static const xe_engine_set_property_fn engine_user_extension_funcs[] = {
358         [XE_ENGINE_EXTENSION_SET_PROPERTY] = engine_user_ext_set_property,
359 };
360
361 #define MAX_USER_EXTENSIONS     16
362 static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
363                                   u64 extensions, int ext_number, bool create)
364 {
365         u64 __user *address = u64_to_user_ptr(extensions);
366         struct xe_user_extension ext;
367         int err;
368         u32 idx;
369
370         if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
371                 return -E2BIG;
372
373         err = __copy_from_user(&ext, address, sizeof(ext));
374         if (XE_IOCTL_ERR(xe, err))
375                 return -EFAULT;
376
377         if (XE_IOCTL_ERR(xe, ext.name >=
378                          ARRAY_SIZE(engine_user_extension_funcs)))
379                 return -EINVAL;
380
381         idx = array_index_nospec(ext.name,
382                                  ARRAY_SIZE(engine_user_extension_funcs));
383         err = engine_user_extension_funcs[idx](xe, e, extensions, create);
384         if (XE_IOCTL_ERR(xe, err))
385                 return err;
386
387         if (ext.next_extension)
388                 return engine_user_extensions(xe, e, ext.next_extension,
389                                               ++ext_number, create);
390
391         return 0;
392 }
393
394 static const enum xe_engine_class user_to_xe_engine_class[] = {
395         [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
396         [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
397         [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
398         [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
399         [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
400 };
401
402 static struct xe_hw_engine *
403 find_hw_engine(struct xe_device *xe,
404                struct drm_xe_engine_class_instance eci)
405 {
406         u32 idx;
407
408         if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
409                 return NULL;
410
411         if (eci.gt_id >= xe->info.tile_count)
412                 return NULL;
413
414         idx = array_index_nospec(eci.engine_class,
415                                  ARRAY_SIZE(user_to_xe_engine_class));
416
417         return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
418                                user_to_xe_engine_class[idx],
419                                eci.engine_instance, true);
420 }
421
422 static u32 bind_engine_logical_mask(struct xe_device *xe, struct xe_gt *gt,
423                                     struct drm_xe_engine_class_instance *eci,
424                                     u16 width, u16 num_placements)
425 {
426         struct xe_hw_engine *hwe;
427         enum xe_hw_engine_id id;
428         u32 logical_mask = 0;
429
430         if (XE_IOCTL_ERR(xe, width != 1))
431                 return 0;
432         if (XE_IOCTL_ERR(xe, num_placements != 1))
433                 return 0;
434         if (XE_IOCTL_ERR(xe, eci[0].engine_instance != 0))
435                 return 0;
436
437         eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
438
439         for_each_hw_engine(hwe, gt, id) {
440                 if (xe_hw_engine_is_reserved(hwe))
441                         continue;
442
443                 if (hwe->class ==
444                     user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
445                         logical_mask |= BIT(hwe->logical_instance);
446         }
447
448         return logical_mask;
449 }
450
451 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
452                                       struct drm_xe_engine_class_instance *eci,
453                                       u16 width, u16 num_placements)
454 {
455         int len = width * num_placements;
456         int i, j, n;
457         u16 class;
458         u16 gt_id;
459         u32 return_mask = 0, prev_mask;
460
461         if (XE_IOCTL_ERR(xe, !xe_device_guc_submission_enabled(xe) &&
462                          len > 1))
463                 return 0;
464
465         for (i = 0; i < width; ++i) {
466                 u32 current_mask = 0;
467
468                 for (j = 0; j < num_placements; ++j) {
469                         struct xe_hw_engine *hwe;
470
471                         n = j * width + i;
472
473                         hwe = find_hw_engine(xe, eci[n]);
474                         if (XE_IOCTL_ERR(xe, !hwe))
475                                 return 0;
476
477                         if (XE_IOCTL_ERR(xe, xe_hw_engine_is_reserved(hwe)))
478                                 return 0;
479
480                         if (XE_IOCTL_ERR(xe, n && eci[n].gt_id != gt_id) ||
481                             XE_IOCTL_ERR(xe, n && eci[n].engine_class != class))
482                                 return 0;
483
484                         class = eci[n].engine_class;
485                         gt_id = eci[n].gt_id;
486
487                         if (width == 1 || !i)
488                                 return_mask |= BIT(eci[n].engine_instance);
489                         current_mask |= BIT(eci[n].engine_instance);
490                 }
491
492                 /* Parallel submissions must be logically contiguous */
493                 if (i && XE_IOCTL_ERR(xe, current_mask != prev_mask << 1))
494                         return 0;
495
496                 prev_mask = current_mask;
497         }
498
499         return return_mask;
500 }
501
502 int xe_engine_create_ioctl(struct drm_device *dev, void *data,
503                            struct drm_file *file)
504 {
505         struct xe_device *xe = to_xe_device(dev);
506         struct xe_file *xef = to_xe_file(file);
507         struct drm_xe_engine_create *args = data;
508         struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
509         struct drm_xe_engine_class_instance __user *user_eci =
510                 u64_to_user_ptr(args->instances);
511         struct xe_hw_engine *hwe;
512         struct xe_vm *vm, *migrate_vm;
513         struct xe_gt *gt;
514         struct xe_engine *e = NULL;
515         u32 logical_mask;
516         u32 id;
517         int len;
518         int err;
519
520         if (XE_IOCTL_ERR(xe, args->flags))
521                 return -EINVAL;
522
523         len = args->width * args->num_placements;
524         if (XE_IOCTL_ERR(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
525                 return -EINVAL;
526
527         err = __copy_from_user(eci, user_eci,
528                                sizeof(struct drm_xe_engine_class_instance) *
529                                len);
530         if (XE_IOCTL_ERR(xe, err))
531                 return -EFAULT;
532
533         if (XE_IOCTL_ERR(xe, eci[0].gt_id >= xe->info.tile_count))
534                return -EINVAL;
535
536         xe_pm_runtime_get(xe);
537
538         if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
539                 for_each_gt(gt, xe, id) {
540                         struct xe_engine *new;
541
542                         if (xe_gt_is_media_type(gt))
543                                 continue;
544
545                         eci[0].gt_id = gt->info.id;
546                         logical_mask = bind_engine_logical_mask(xe, gt, eci,
547                                                                 args->width,
548                                                                 args->num_placements);
549                         if (XE_IOCTL_ERR(xe, !logical_mask)) {
550                                 err = -EINVAL;
551                                 goto put_rpm;
552                         }
553
554                         hwe = find_hw_engine(xe, eci[0]);
555                         if (XE_IOCTL_ERR(xe, !hwe)) {
556                                 err = -EINVAL;
557                                 goto put_rpm;
558                         }
559
560                         migrate_vm = xe_migrate_get_vm(gt->migrate);
561                         new = xe_engine_create(xe, migrate_vm, logical_mask,
562                                                args->width, hwe,
563                                                ENGINE_FLAG_PERSISTENT |
564                                                ENGINE_FLAG_VM |
565                                                (id ?
566                                                ENGINE_FLAG_BIND_ENGINE_CHILD :
567                                                0));
568                         xe_vm_put(migrate_vm);
569                         if (IS_ERR(new)) {
570                                 err = PTR_ERR(new);
571                                 if (e)
572                                         goto put_engine;
573                                 goto put_rpm;
574                         }
575                         if (id == 0)
576                                 e = new;
577                         else
578                                 list_add_tail(&new->multi_gt_list,
579                                               &e->multi_gt_link);
580                 }
581         } else {
582                 gt = xe_device_get_gt(xe, eci[0].gt_id);
583                 logical_mask = calc_validate_logical_mask(xe, gt, eci,
584                                                           args->width,
585                                                           args->num_placements);
586                 if (XE_IOCTL_ERR(xe, !logical_mask)) {
587                         err = -EINVAL;
588                         goto put_rpm;
589                 }
590
591                 hwe = find_hw_engine(xe, eci[0]);
592                 if (XE_IOCTL_ERR(xe, !hwe)) {
593                         err = -EINVAL;
594                         goto put_rpm;
595                 }
596
597                 vm = xe_vm_lookup(xef, args->vm_id);
598                 if (XE_IOCTL_ERR(xe, !vm)) {
599                         err = -ENOENT;
600                         goto put_rpm;
601                 }
602
603                 e = xe_engine_create(xe, vm, logical_mask,
604                                      args->width, hwe, ENGINE_FLAG_PERSISTENT);
605                 xe_vm_put(vm);
606                 if (IS_ERR(e)) {
607                         err = PTR_ERR(e);
608                         goto put_rpm;
609                 }
610         }
611
612         if (args->extensions) {
613                 err = engine_user_extensions(xe, e, args->extensions, 0, true);
614                 if (XE_IOCTL_ERR(xe, err))
615                         goto put_engine;
616         }
617
618         if (XE_IOCTL_ERR(xe, e->vm && xe_vm_in_compute_mode(e->vm) !=
619                          !!(e->flags & ENGINE_FLAG_COMPUTE_MODE))) {
620                 err = -ENOTSUPP;
621                 goto put_engine;
622         }
623
624         e->persistent.xef = xef;
625
626         mutex_lock(&xef->engine.lock);
627         err = xa_alloc(&xef->engine.xa, &id, e, xa_limit_32b, GFP_KERNEL);
628         mutex_unlock(&xef->engine.lock);
629         if (err)
630                 goto put_engine;
631
632         args->engine_id = id;
633
634         return 0;
635
636 put_engine:
637         xe_engine_kill(e);
638         xe_engine_put(e);
639 put_rpm:
640         xe_pm_runtime_put(xe);
641         return err;
642 }
643
644 int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
645                                  struct drm_file *file)
646 {
647         struct xe_device *xe = to_xe_device(dev);
648         struct xe_file *xef = to_xe_file(file);
649         struct drm_xe_engine_get_property *args = data;
650         struct xe_engine *e;
651
652         mutex_lock(&xef->engine.lock);
653         e = xa_load(&xef->engine.xa, args->engine_id);
654         mutex_unlock(&xef->engine.lock);
655
656         if (XE_IOCTL_ERR(xe, !e))
657                 return -ENOENT;
658
659         switch (args->property) {
660         case XE_ENGINE_GET_PROPERTY_BAN:
661                 args->value = !!(e->flags & ENGINE_FLAG_BANNED);
662                 break;
663         default:
664                 return -EINVAL;
665         }
666
667         return 0;
668 }
669
670 static void engine_kill_compute(struct xe_engine *e)
671 {
672         if (!xe_vm_in_compute_mode(e->vm))
673                 return;
674
675         down_write(&e->vm->lock);
676         list_del(&e->compute.link);
677         --e->vm->preempt.num_engines;
678         if (e->compute.pfence) {
679                 dma_fence_enable_sw_signaling(e->compute.pfence);
680                 dma_fence_put(e->compute.pfence);
681                 e->compute.pfence = NULL;
682         }
683         up_write(&e->vm->lock);
684 }
685
686 /**
687  * xe_engine_is_idle() - Whether an engine is idle.
688  * @engine: The engine
689  *
690  * FIXME: Need to determine what to use as the short-lived
691  * timeline lock for the engines, so that the return value
692  * of this function becomes more than just an advisory
693  * snapshot in time. The timeline lock must protect the
694  * seqno from racing submissions on the same engine.
695  * Typically vm->resv, but user-created timeline locks use the migrate vm
696  * and never grabs the migrate vm->resv so we have a race there.
697  *
698  * Return: True if the engine is idle, false otherwise.
699  */
700 bool xe_engine_is_idle(struct xe_engine *engine)
701 {
702         if (XE_WARN_ON(xe_engine_is_parallel(engine)))
703                 return false;
704
705         return xe_lrc_seqno(&engine->lrc[0]) ==
706                 engine->lrc[0].fence_ctx.next_seqno - 1;
707 }
708
709 void xe_engine_kill(struct xe_engine *e)
710 {
711         struct xe_engine *engine = e, *next;
712
713         list_for_each_entry_safe(engine, next, &engine->multi_gt_list,
714                                  multi_gt_link) {
715                 e->ops->kill(engine);
716                 engine_kill_compute(engine);
717         }
718
719         e->ops->kill(e);
720         engine_kill_compute(e);
721 }
722
723 int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
724                             struct drm_file *file)
725 {
726         struct xe_device *xe = to_xe_device(dev);
727         struct xe_file *xef = to_xe_file(file);
728         struct drm_xe_engine_destroy *args = data;
729         struct xe_engine *e;
730
731         if (XE_IOCTL_ERR(xe, args->pad))
732                 return -EINVAL;
733
734         mutex_lock(&xef->engine.lock);
735         e = xa_erase(&xef->engine.xa, args->engine_id);
736         mutex_unlock(&xef->engine.lock);
737         if (XE_IOCTL_ERR(xe, !e))
738                 return -ENOENT;
739
740         if (!(e->flags & ENGINE_FLAG_PERSISTENT))
741                 xe_engine_kill(e);
742         else
743                 xe_device_add_persistent_engines(xe, e);
744
745         trace_xe_engine_close(e);
746         xe_engine_put(e);
747         xe_pm_runtime_put(xe);
748
749         return 0;
750 }
751
752 int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
753                                  struct drm_file *file)
754 {
755         struct xe_device *xe = to_xe_device(dev);
756         struct xe_file *xef = to_xe_file(file);
757         struct drm_xe_engine_set_property *args = data;
758         struct xe_engine *e;
759         int ret;
760         u32 idx;
761
762         e = xe_engine_lookup(xef, args->engine_id);
763         if (XE_IOCTL_ERR(xe, !e))
764                 return -ENOENT;
765
766         if (XE_IOCTL_ERR(xe, args->property >=
767                          ARRAY_SIZE(engine_set_property_funcs))) {
768                 ret = -EINVAL;
769                 goto out;
770         }
771
772         idx = array_index_nospec(args->property,
773                                  ARRAY_SIZE(engine_set_property_funcs));
774         ret = engine_set_property_funcs[idx](xe, e, args->value, false);
775         if (XE_IOCTL_ERR(xe, ret))
776                 goto out;
777
778         if (args->extensions)
779                 ret = engine_user_extensions(xe, e, args->extensions, 0,
780                                              false);
781 out:
782         xe_engine_put(e);
783
784         return ret;
785 }