2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
25 #include <drm/drm_auth.h>
26 #include <drm/drm_drv.h>
28 #include "amdgpu_sched.h"
29 #include "amdgpu_ras.h"
30 #include <linux/nospec.h>
32 #define to_amdgpu_ctx_entity(e) \
33 container_of((e), struct amdgpu_ctx_entity, entity)
35 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
36 [AMDGPU_HW_IP_GFX] = 1,
37 [AMDGPU_HW_IP_COMPUTE] = 4,
38 [AMDGPU_HW_IP_DMA] = 2,
39 [AMDGPU_HW_IP_UVD] = 1,
40 [AMDGPU_HW_IP_VCE] = 1,
41 [AMDGPU_HW_IP_UVD_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_DEC] = 1,
43 [AMDGPU_HW_IP_VCN_ENC] = 1,
44 [AMDGPU_HW_IP_VCN_JPEG] = 1,
47 bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
50 case AMDGPU_CTX_PRIORITY_VERY_LOW:
51 case AMDGPU_CTX_PRIORITY_LOW:
52 case AMDGPU_CTX_PRIORITY_NORMAL:
53 case AMDGPU_CTX_PRIORITY_HIGH:
54 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
57 case AMDGPU_CTX_PRIORITY_UNSET:
62 static enum drm_sched_priority
63 amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
66 case AMDGPU_CTX_PRIORITY_UNSET:
67 pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
68 return DRM_SCHED_PRIORITY_NORMAL;
70 case AMDGPU_CTX_PRIORITY_VERY_LOW:
71 return DRM_SCHED_PRIORITY_MIN;
73 case AMDGPU_CTX_PRIORITY_LOW:
74 return DRM_SCHED_PRIORITY_MIN;
76 case AMDGPU_CTX_PRIORITY_NORMAL:
77 return DRM_SCHED_PRIORITY_NORMAL;
79 case AMDGPU_CTX_PRIORITY_HIGH:
80 return DRM_SCHED_PRIORITY_HIGH;
82 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
83 return DRM_SCHED_PRIORITY_HIGH;
85 /* This should not happen as we sanitized userspace provided priority
86 * already, WARN if this happens.
89 WARN(1, "Invalid context priority %d\n", ctx_prio);
90 return DRM_SCHED_PRIORITY_NORMAL;
95 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
98 if (!amdgpu_ctx_priority_is_valid(priority))
101 /* NORMAL and below are accessible by everyone */
102 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
105 if (capable(CAP_SYS_NICE))
108 if (drm_is_current_master(filp))
114 static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
117 case AMDGPU_CTX_PRIORITY_HIGH:
118 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
119 return AMDGPU_GFX_PIPE_PRIO_HIGH;
121 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
125 static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
128 case AMDGPU_CTX_PRIORITY_HIGH:
129 return AMDGPU_RING_PRIO_1;
130 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
131 return AMDGPU_RING_PRIO_2;
133 return AMDGPU_RING_PRIO_0;
137 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
139 struct amdgpu_device *adev = ctx->mgr->adev;
140 unsigned int hw_prio;
143 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
144 ctx->init_priority : ctx->override_priority;
147 case AMDGPU_HW_IP_GFX:
148 case AMDGPU_HW_IP_COMPUTE:
149 hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
151 case AMDGPU_HW_IP_VCE:
152 case AMDGPU_HW_IP_VCN_ENC:
153 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
156 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
160 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
161 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
162 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
167 /* Calculate the time spend on the hw */
168 static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
170 struct drm_sched_fence *s_fence;
173 return ns_to_ktime(0);
175 /* When the fence is not even scheduled it can't have spend time */
176 s_fence = to_drm_sched_fence(fence);
177 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
178 return ns_to_ktime(0);
180 /* When it is still running account how much already spend */
181 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
182 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
184 return ktime_sub(s_fence->finished.timestamp,
185 s_fence->scheduled.timestamp);
188 static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
189 struct amdgpu_ctx_entity *centity)
191 ktime_t res = ns_to_ktime(0);
194 spin_lock(&ctx->ring_lock);
195 for (i = 0; i < amdgpu_sched_jobs; i++) {
196 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
198 spin_unlock(&ctx->ring_lock);
202 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
205 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
206 struct amdgpu_device *adev = ctx->mgr->adev;
207 struct amdgpu_ctx_entity *entity;
208 enum drm_sched_priority drm_prio;
209 unsigned int hw_prio, num_scheds;
213 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
218 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
219 ctx->init_priority : ctx->override_priority;
220 entity->hw_ip = hw_ip;
221 entity->sequence = 1;
222 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
223 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
225 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
227 if (!(adev)->xcp_mgr) {
228 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
229 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
231 struct amdgpu_fpriv *fpriv;
233 fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr);
234 r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
235 &num_scheds, &scheds);
240 /* disable load balance if the hw engine retains context among dependent jobs */
241 if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
242 hw_ip == AMDGPU_HW_IP_VCN_DEC ||
243 hw_ip == AMDGPU_HW_IP_UVD_ENC ||
244 hw_ip == AMDGPU_HW_IP_UVD) {
245 sched = drm_sched_pick_best(scheds, num_scheds);
250 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
253 goto error_free_entity;
255 /* It's not an error if we fail to install the new entity */
256 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
262 drm_sched_entity_fini(&entity->entity);
270 static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
271 struct amdgpu_ctx_entity *entity)
273 ktime_t res = ns_to_ktime(0);
279 for (i = 0; i < amdgpu_sched_jobs; ++i) {
280 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
281 dma_fence_put(entity->fences[i]);
284 amdgpu_xcp_release_sched(adev, entity);
290 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
293 struct amdgpu_device *adev = ctx->mgr->adev;
294 enum amd_dpm_forced_level current_level;
296 current_level = amdgpu_dpm_get_performance_level(adev);
298 switch (current_level) {
299 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
300 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
302 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
303 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
305 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
306 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
308 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
309 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
312 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
318 static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
319 struct drm_file *filp, struct amdgpu_ctx *ctx)
321 struct amdgpu_fpriv *fpriv = filp->driver_priv;
322 u32 current_stable_pstate;
325 r = amdgpu_ctx_priority_permit(filp, priority);
329 memset(ctx, 0, sizeof(*ctx));
331 kref_init(&ctx->refcount);
333 spin_lock_init(&ctx->ring_lock);
335 ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
336 ctx->reset_counter_query = ctx->reset_counter;
337 ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
338 ctx->init_priority = priority;
339 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
341 r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
345 if (mgr->adev->pm.stable_pstate_ctx)
346 ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
348 ctx->stable_pstate = current_stable_pstate;
350 ctx->ctx_mgr = &(fpriv->ctx_mgr);
354 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
357 struct amdgpu_device *adev = ctx->mgr->adev;
358 enum amd_dpm_forced_level level;
359 u32 current_stable_pstate;
362 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
363 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
368 r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
369 if (r || (stable_pstate == current_stable_pstate))
372 switch (stable_pstate) {
373 case AMDGPU_CTX_STABLE_PSTATE_NONE:
374 level = AMD_DPM_FORCED_LEVEL_AUTO;
376 case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
379 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
380 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
382 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
383 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
385 case AMDGPU_CTX_STABLE_PSTATE_PEAK:
386 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
393 r = amdgpu_dpm_force_performance_level(adev, level);
395 if (level == AMD_DPM_FORCED_LEVEL_AUTO)
396 adev->pm.stable_pstate_ctx = NULL;
398 adev->pm.stable_pstate_ctx = ctx;
400 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
405 static void amdgpu_ctx_fini(struct kref *ref)
407 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
408 struct amdgpu_ctx_mgr *mgr = ctx->mgr;
409 struct amdgpu_device *adev = mgr->adev;
415 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
416 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
419 spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);
420 atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
424 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
425 amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
432 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
433 u32 ring, struct drm_sched_entity **entity)
436 struct drm_sched_entity *ctx_entity;
438 if (hw_ip >= AMDGPU_HW_IP_NUM) {
439 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
443 /* Right now all IPs have only one instance - multiple rings. */
445 DRM_DEBUG("invalid ip instance: %d\n", instance);
449 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
450 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
454 if (ctx->entities[hw_ip][ring] == NULL) {
455 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
460 ctx_entity = &ctx->entities[hw_ip][ring]->entity;
461 r = drm_sched_entity_error(ctx_entity);
463 DRM_DEBUG("error entity %p\n", ctx_entity);
467 *entity = ctx_entity;
471 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
472 struct amdgpu_fpriv *fpriv,
473 struct drm_file *filp,
477 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
478 struct amdgpu_ctx *ctx;
481 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
485 mutex_lock(&mgr->lock);
486 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
488 mutex_unlock(&mgr->lock);
494 r = amdgpu_ctx_init(mgr, priority, filp, ctx);
496 idr_remove(&mgr->ctx_handles, *id);
500 mutex_unlock(&mgr->lock);
504 static void amdgpu_ctx_do_release(struct kref *ref)
506 struct amdgpu_ctx *ctx;
509 ctx = container_of(ref, struct amdgpu_ctx, refcount);
510 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
511 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
512 if (!ctx->entities[i][j])
515 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
519 amdgpu_ctx_fini(ref);
522 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
524 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
525 struct amdgpu_ctx *ctx;
527 mutex_lock(&mgr->lock);
528 ctx = idr_remove(&mgr->ctx_handles, id);
530 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
531 mutex_unlock(&mgr->lock);
532 return ctx ? 0 : -EINVAL;
535 static int amdgpu_ctx_query(struct amdgpu_device *adev,
536 struct amdgpu_fpriv *fpriv, uint32_t id,
537 union drm_amdgpu_ctx_out *out)
539 struct amdgpu_ctx *ctx;
540 struct amdgpu_ctx_mgr *mgr;
541 unsigned reset_counter;
546 mgr = &fpriv->ctx_mgr;
547 mutex_lock(&mgr->lock);
548 ctx = idr_find(&mgr->ctx_handles, id);
550 mutex_unlock(&mgr->lock);
554 /* TODO: these two are always zero */
555 out->state.flags = 0x0;
556 out->state.hangs = 0x0;
558 /* determine if a GPU reset has occured since the last call */
559 reset_counter = atomic_read(&adev->gpu_reset_counter);
560 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
561 if (ctx->reset_counter_query == reset_counter)
562 out->state.reset_status = AMDGPU_CTX_NO_RESET;
564 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
565 ctx->reset_counter_query = reset_counter;
567 mutex_unlock(&mgr->lock);
571 #define AMDGPU_RAS_COUNTE_DELAY_MS 3000
573 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
574 struct amdgpu_fpriv *fpriv, uint32_t id,
575 union drm_amdgpu_ctx_out *out)
577 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
578 struct amdgpu_ctx *ctx;
579 struct amdgpu_ctx_mgr *mgr;
584 mgr = &fpriv->ctx_mgr;
585 mutex_lock(&mgr->lock);
586 ctx = idr_find(&mgr->ctx_handles, id);
588 mutex_unlock(&mgr->lock);
592 out->state.flags = 0x0;
593 out->state.hangs = 0x0;
595 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
596 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
598 if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
599 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
601 if (atomic_read(&ctx->guilty))
602 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
604 if (amdgpu_in_reset(adev))
605 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;
607 if (adev->ras_enabled && con) {
608 /* Return the cached values in O(1),
609 * and schedule delayed work to cache
612 int ce_count, ue_count;
614 ce_count = atomic_read(&con->ras_ce_count);
615 ue_count = atomic_read(&con->ras_ue_count);
617 if (ce_count != ctx->ras_counter_ce) {
618 ctx->ras_counter_ce = ce_count;
619 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
622 if (ue_count != ctx->ras_counter_ue) {
623 ctx->ras_counter_ue = ue_count;
624 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
627 schedule_delayed_work(&con->ras_counte_delay_work,
628 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
631 mutex_unlock(&mgr->lock);
637 static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
638 struct amdgpu_fpriv *fpriv, uint32_t id,
639 bool set, u32 *stable_pstate)
641 struct amdgpu_ctx *ctx;
642 struct amdgpu_ctx_mgr *mgr;
648 mgr = &fpriv->ctx_mgr;
649 mutex_lock(&mgr->lock);
650 ctx = idr_find(&mgr->ctx_handles, id);
652 mutex_unlock(&mgr->lock);
657 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
659 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
661 mutex_unlock(&mgr->lock);
665 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
666 struct drm_file *filp)
669 uint32_t id, stable_pstate;
672 union drm_amdgpu_ctx *args = data;
673 struct amdgpu_device *adev = drm_to_adev(dev);
674 struct amdgpu_fpriv *fpriv = filp->driver_priv;
676 id = args->in.ctx_id;
677 priority = args->in.priority;
679 /* For backwards compatibility reasons, we need to accept
680 * ioctls with garbage in the priority field */
681 if (!amdgpu_ctx_priority_is_valid(priority))
682 priority = AMDGPU_CTX_PRIORITY_NORMAL;
684 switch (args->in.op) {
685 case AMDGPU_CTX_OP_ALLOC_CTX:
686 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
687 args->out.alloc.ctx_id = id;
689 case AMDGPU_CTX_OP_FREE_CTX:
690 r = amdgpu_ctx_free(fpriv, id);
692 case AMDGPU_CTX_OP_QUERY_STATE:
693 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
695 case AMDGPU_CTX_OP_QUERY_STATE2:
696 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
698 case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
701 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
703 args->out.pstate.flags = stable_pstate;
705 case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
706 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
708 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
709 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
711 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
720 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
722 struct amdgpu_ctx *ctx;
723 struct amdgpu_ctx_mgr *mgr;
728 mgr = &fpriv->ctx_mgr;
730 mutex_lock(&mgr->lock);
731 ctx = idr_find(&mgr->ctx_handles, id);
733 kref_get(&ctx->refcount);
734 mutex_unlock(&mgr->lock);
738 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
743 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
747 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
748 struct drm_sched_entity *entity,
749 struct dma_fence *fence)
751 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
752 uint64_t seq = centity->sequence;
753 struct dma_fence *other = NULL;
756 idx = seq & (amdgpu_sched_jobs - 1);
757 other = centity->fences[idx];
758 WARN_ON(other && !dma_fence_is_signaled(other));
760 dma_fence_get(fence);
762 spin_lock(&ctx->ring_lock);
763 centity->fences[idx] = fence;
765 spin_unlock(&ctx->ring_lock);
767 atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
768 &ctx->mgr->time_spend[centity->hw_ip]);
770 dma_fence_put(other);
774 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
775 struct drm_sched_entity *entity,
778 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
779 struct dma_fence *fence;
781 spin_lock(&ctx->ring_lock);
784 seq = centity->sequence - 1;
786 if (seq >= centity->sequence) {
787 spin_unlock(&ctx->ring_lock);
788 return ERR_PTR(-EINVAL);
792 if (seq + amdgpu_sched_jobs < centity->sequence) {
793 spin_unlock(&ctx->ring_lock);
797 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
798 spin_unlock(&ctx->ring_lock);
803 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
804 struct amdgpu_ctx_entity *aentity,
808 struct amdgpu_device *adev = ctx->mgr->adev;
809 unsigned int hw_prio;
810 struct drm_gpu_scheduler **scheds = NULL;
813 /* set sw priority */
814 drm_sched_entity_set_priority(&aentity->entity,
815 amdgpu_ctx_to_drm_sched_prio(priority));
817 /* set hw priority */
818 if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
819 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
820 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
821 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
822 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
823 drm_sched_entity_modify_sched(&aentity->entity, scheds,
828 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
834 ctx->override_priority = priority;
836 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
837 ctx->init_priority : ctx->override_priority;
838 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
839 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
840 if (!ctx->entities[i][j])
843 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
849 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
850 struct drm_sched_entity *entity)
852 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
853 struct dma_fence *other;
857 spin_lock(&ctx->ring_lock);
858 idx = centity->sequence & (amdgpu_sched_jobs - 1);
859 other = dma_fence_get(centity->fences[idx]);
860 spin_unlock(&ctx->ring_lock);
865 r = dma_fence_wait(other, true);
866 if (r < 0 && r != -ERESTARTSYS)
867 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
869 dma_fence_put(other);
873 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
874 struct amdgpu_device *adev)
879 mutex_init(&mgr->lock);
880 idr_init_base(&mgr->ctx_handles, 1);
882 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
883 atomic64_set(&mgr->time_spend[i], 0);
886 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
888 struct amdgpu_ctx *ctx;
892 idp = &mgr->ctx_handles;
894 mutex_lock(&mgr->lock);
895 idr_for_each_entry(idp, ctx, id) {
896 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
897 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
898 struct drm_sched_entity *entity;
900 if (!ctx->entities[i][j])
903 entity = &ctx->entities[i][j]->entity;
904 timeout = drm_sched_entity_flush(entity, timeout);
908 mutex_unlock(&mgr->lock);
912 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
914 struct amdgpu_ctx *ctx;
918 idp = &mgr->ctx_handles;
920 idr_for_each_entry(idp, ctx, id) {
921 if (kref_read(&ctx->refcount) != 1) {
922 DRM_ERROR("ctx %p is still alive\n", ctx);
926 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
927 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
928 struct drm_sched_entity *entity;
930 if (!ctx->entities[i][j])
933 entity = &ctx->entities[i][j]->entity;
934 drm_sched_entity_fini(entity);
940 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
942 struct amdgpu_ctx *ctx;
946 amdgpu_ctx_mgr_entity_fini(mgr);
948 idp = &mgr->ctx_handles;
950 idr_for_each_entry(idp, ctx, id) {
951 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
952 DRM_ERROR("ctx %p is still alive\n", ctx);
955 idr_destroy(&mgr->ctx_handles);
956 mutex_destroy(&mgr->lock);
959 void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
960 ktime_t usage[AMDGPU_HW_IP_NUM])
962 struct amdgpu_ctx *ctx;
963 unsigned int hw_ip, i;
967 * This is a little bit racy because it can be that a ctx or a fence are
968 * destroyed just in the moment we try to account them. But that is ok
969 * since exactly that case is explicitely allowed by the interface.
971 mutex_lock(&mgr->lock);
972 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
973 uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
975 usage[hw_ip] = ns_to_ktime(ns);
978 idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
979 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
980 for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
981 struct amdgpu_ctx_entity *centity;
984 centity = ctx->entities[hw_ip][i];
987 spend = amdgpu_ctx_entity_time(ctx, centity);
988 usage[hw_ip] = ktime_add(usage[hw_ip], spend);
992 mutex_unlock(&mgr->lock);