2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
35 * GPU GFX IP block helpers function.
38 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
43 bit += mec * adev->gfx.mec.num_pipe_per_mec
44 * adev->gfx.mec.num_queue_per_pipe;
45 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
51 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
52 int *mec, int *pipe, int *queue)
54 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
55 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
56 % adev->gfx.mec.num_pipe_per_mec;
57 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
58 / adev->gfx.mec.num_pipe_per_mec;
62 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
63 int mec, int pipe, int queue)
65 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
66 adev->gfx.mec.queue_bitmap);
69 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
70 int me, int pipe, int queue)
74 bit += me * adev->gfx.me.num_pipe_per_me
75 * adev->gfx.me.num_queue_per_pipe;
76 bit += pipe * adev->gfx.me.num_queue_per_pipe;
82 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
83 int *me, int *pipe, int *queue)
85 *queue = bit % adev->gfx.me.num_queue_per_pipe;
86 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
87 % adev->gfx.me.num_pipe_per_me;
88 *me = (bit / adev->gfx.me.num_queue_per_pipe)
89 / adev->gfx.me.num_pipe_per_me;
92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
93 int me, int pipe, int queue)
95 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
96 adev->gfx.me.queue_bitmap);
100 * amdgpu_gfx_scratch_get - Allocate a scratch register
102 * @adev: amdgpu_device pointer
103 * @reg: scratch register mmio offset
105 * Allocate a CP scratch register for use by the driver (all asics).
106 * Returns 0 on success or -EINVAL on failure.
108 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
112 i = ffs(adev->gfx.scratch.free_mask);
113 if (i != 0 && i <= adev->gfx.scratch.num_reg) {
115 adev->gfx.scratch.free_mask &= ~(1u << i);
116 *reg = adev->gfx.scratch.reg_base + i;
123 * amdgpu_gfx_scratch_free - Free a scratch register
125 * @adev: amdgpu_device pointer
126 * @reg: scratch register mmio offset
128 * Free a CP scratch register allocated for use by the driver (all asics)
130 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
132 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
136 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
138 * @mask: array in which the per-shader array disable masks will be stored
139 * @max_se: number of SEs
140 * @max_sh: number of SHs
142 * The bitmask of CUs to be disabled in the shader array determined by se and
143 * sh is stored in mask[se * max_sh + sh].
145 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
150 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
152 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
155 p = amdgpu_disable_cu;
158 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
160 DRM_ERROR("amdgpu: could not parse disable_cu\n");
164 if (se < max_se && sh < max_sh && cu < 16) {
165 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
166 mask[se * max_sh + sh] |= 1u << cu;
168 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
172 next = strchr(p, ',');
179 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
181 if (amdgpu_compute_multipipe != -1) {
182 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
183 amdgpu_compute_multipipe);
184 return amdgpu_compute_multipipe == 1;
187 /* FIXME: spreading the queues across pipes causes perf regressions
188 * on POLARIS11 compute workloads */
189 if (adev->asic_type == CHIP_POLARIS11)
192 return adev->gfx.mec.num_mec > 1;
195 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
198 bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
200 /* Policy: alternate between normal and high priority */
201 cond = multipipe_policy ? pipe : queue;
203 return ((cond % 2) != 0);
207 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
210 bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
211 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
212 adev->gfx.mec.num_queue_per_pipe,
213 adev->gfx.num_compute_rings);
215 if (multipipe_policy) {
216 /* policy: make queues evenly cross all pipes on MEC1 only */
217 for (i = 0; i < max_queues_per_mec; i++) {
218 pipe = i % adev->gfx.mec.num_pipe_per_mec;
219 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
220 adev->gfx.mec.num_queue_per_pipe;
222 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
223 adev->gfx.mec.queue_bitmap);
226 /* policy: amdgpu owns all queues in the given pipe */
227 for (i = 0; i < max_queues_per_mec; ++i)
228 set_bit(i, adev->gfx.mec.queue_bitmap);
231 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
234 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
238 for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
239 queue = i % adev->gfx.me.num_queue_per_pipe;
240 me = (i / adev->gfx.me.num_queue_per_pipe)
241 / adev->gfx.me.num_pipe_per_me;
243 if (me >= adev->gfx.me.num_me)
245 /* policy: amdgpu owns the first queue per pipe at this stage
246 * will extend to mulitple queues per pipe later */
247 if (me == 0 && queue < 1)
248 set_bit(i, adev->gfx.me.queue_bitmap);
251 /* update the number of active graphics rings */
252 adev->gfx.num_gfx_rings =
253 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
256 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
257 struct amdgpu_ring *ring)
260 int mec, pipe, queue;
262 queue_bit = adev->gfx.mec.num_mec
263 * adev->gfx.mec.num_pipe_per_mec
264 * adev->gfx.mec.num_queue_per_pipe;
266 while (queue_bit-- >= 0) {
267 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
270 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
273 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
274 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
275 * only can be issued on queue 0.
277 if ((mec == 1 && pipe > 1) || queue != 0)
287 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
291 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
292 struct amdgpu_ring *ring,
293 struct amdgpu_irq_src *irq)
295 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
298 spin_lock_init(&kiq->ring_lock);
301 ring->ring_obj = NULL;
302 ring->use_doorbell = true;
303 ring->doorbell_index = adev->doorbell_index.kiq;
305 r = amdgpu_gfx_kiq_acquire(adev, ring);
309 ring->eop_gpu_addr = kiq->eop_gpu_addr;
310 ring->no_scheduler = true;
311 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
312 r = amdgpu_ring_init(adev, ring, 1024,
313 irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
314 AMDGPU_RING_PRIO_DEFAULT);
316 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
321 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
323 amdgpu_ring_fini(ring);
326 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
328 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
330 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
333 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
338 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
340 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
341 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
342 &kiq->eop_gpu_addr, (void **)&hpd);
344 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
348 memset(hpd, 0, hpd_size);
350 r = amdgpu_bo_reserve(kiq->eop_obj, true);
351 if (unlikely(r != 0))
352 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
353 amdgpu_bo_kunmap(kiq->eop_obj);
354 amdgpu_bo_unreserve(kiq->eop_obj);
359 /* create MQD for each compute/gfx queue */
360 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
363 struct amdgpu_ring *ring = NULL;
366 /* create MQD for KIQ */
367 ring = &adev->gfx.kiq.ring;
368 if (!ring->mqd_obj) {
369 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
370 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
371 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
372 * KIQ MQD no matter SRIOV or Bare-metal
374 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
375 AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
376 &ring->mqd_gpu_addr, &ring->mqd_ptr);
378 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
382 /* prepare MQD backup */
383 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
384 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
385 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
388 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
389 /* create MQD for each KGQ */
390 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
391 ring = &adev->gfx.gfx_ring[i];
392 if (!ring->mqd_obj) {
393 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
394 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
395 &ring->mqd_gpu_addr, &ring->mqd_ptr);
397 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
401 /* prepare MQD backup */
402 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
403 if (!adev->gfx.me.mqd_backup[i])
404 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
409 /* create MQD for each KCQ */
410 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
411 ring = &adev->gfx.compute_ring[i];
412 if (!ring->mqd_obj) {
413 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
414 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
415 &ring->mqd_gpu_addr, &ring->mqd_ptr);
417 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
421 /* prepare MQD backup */
422 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
423 if (!adev->gfx.mec.mqd_backup[i])
424 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
431 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
433 struct amdgpu_ring *ring = NULL;
436 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
437 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
438 ring = &adev->gfx.gfx_ring[i];
439 kfree(adev->gfx.me.mqd_backup[i]);
440 amdgpu_bo_free_kernel(&ring->mqd_obj,
446 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
447 ring = &adev->gfx.compute_ring[i];
448 kfree(adev->gfx.mec.mqd_backup[i]);
449 amdgpu_bo_free_kernel(&ring->mqd_obj,
454 ring = &adev->gfx.kiq.ring;
455 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
456 amdgpu_bo_free_kernel(&ring->mqd_obj,
461 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
463 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
464 struct amdgpu_ring *kiq_ring = &kiq->ring;
467 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
470 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
471 adev->gfx.num_compute_rings))
474 for (i = 0; i < adev->gfx.num_compute_rings; i++)
475 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
478 return amdgpu_ring_test_helper(kiq_ring);
481 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
484 int mec, pipe, queue;
485 int set_resource_bit = 0;
487 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
489 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
491 return set_resource_bit;
494 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
496 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
497 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
498 uint64_t queue_mask = 0;
501 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
504 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
505 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
508 /* This situation may be hit in the future if a new HW
509 * generation exposes more than 64 queues. If so, the
510 * definition of queue_mask needs updating */
511 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
512 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
516 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
519 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
522 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
523 adev->gfx.num_compute_rings +
524 kiq->pmf->set_resources_size);
526 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
530 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
531 for (i = 0; i < adev->gfx.num_compute_rings; i++)
532 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
534 r = amdgpu_ring_test_helper(kiq_ring);
536 DRM_ERROR("KCQ enable failed\n");
541 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
543 * @adev: amdgpu_device pointer
544 * @bool enable true: enable gfx off feature, false: disable gfx off feature
546 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
547 * 2. other client can send request to disable gfx off feature, the request should be honored.
548 * 3. other client can cancel their request of disable gfx off feature
549 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
552 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
554 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
557 mutex_lock(&adev->gfx.gfx_off_mutex);
560 adev->gfx.gfx_off_req_count++;
561 else if (adev->gfx.gfx_off_req_count > 0)
562 adev->gfx.gfx_off_req_count--;
564 if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
565 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
566 } else if (!enable && adev->gfx.gfx_off_state) {
567 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
568 adev->gfx.gfx_off_state = false;
570 if (adev->gfx.funcs->init_spm_golden) {
571 dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
572 amdgpu_gfx_init_spm_golden(adev);
577 mutex_unlock(&adev->gfx.gfx_off_mutex);
580 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
585 mutex_lock(&adev->gfx.gfx_off_mutex);
587 r = smu_get_status_gfxoff(adev, value);
589 mutex_unlock(&adev->gfx.gfx_off_mutex);
594 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
597 struct ras_fs_if fs_info = {
598 .sysfs_name = "gfx_err_count",
600 struct ras_ih_if ih_info = {
601 .cb = amdgpu_gfx_process_ras_data_cb,
604 if (!adev->gfx.ras_if) {
605 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
606 if (!adev->gfx.ras_if)
608 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
609 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
610 adev->gfx.ras_if->sub_block_index = 0;
611 strcpy(adev->gfx.ras_if->name, "gfx");
613 fs_info.head = ih_info.head = *adev->gfx.ras_if;
615 r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
620 if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
621 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
625 /* free gfx ras_if if ras is not supported */
632 amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
634 kfree(adev->gfx.ras_if);
635 adev->gfx.ras_if = NULL;
639 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
641 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
643 struct ras_common_if *ras_if = adev->gfx.ras_if;
644 struct ras_ih_if ih_info = {
646 .cb = amdgpu_gfx_process_ras_data_cb,
649 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
654 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
656 struct amdgpu_iv_entry *entry)
658 /* TODO ue will trigger an interrupt.
660 * When “Full RAS” is enabled, the per-IP interrupt sources should
661 * be disabled and the driver should only look for the aggregated
662 * interrupt via sync flood
664 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
665 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
666 if (adev->gfx.funcs->query_ras_error_count)
667 adev->gfx.funcs->query_ras_error_count(adev, err_data);
668 amdgpu_ras_reset_gpu(adev);
670 return AMDGPU_RAS_SUCCESS;
673 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
674 struct amdgpu_irq_src *source,
675 struct amdgpu_iv_entry *entry)
677 struct ras_common_if *ras_if = adev->gfx.ras_if;
678 struct ras_dispatch_if ih_data = {
685 ih_data.head = *ras_if;
687 DRM_ERROR("CP ECC ERROR IRQ\n");
688 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
692 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
694 signed long r, cnt = 0;
696 uint32_t seq, reg_val_offs = 0, value = 0;
697 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
698 struct amdgpu_ring *ring = &kiq->ring;
700 if (adev->in_pci_err_recovery)
703 BUG_ON(!ring->funcs->emit_rreg);
705 spin_lock_irqsave(&kiq->ring_lock, flags);
706 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
707 pr_err("critical bug! too many kiq readers\n");
710 amdgpu_ring_alloc(ring, 32);
711 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
712 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
716 amdgpu_ring_commit(ring);
717 spin_unlock_irqrestore(&kiq->ring_lock, flags);
719 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
721 /* don't wait anymore for gpu reset case because this way may
722 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
723 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
724 * never return if we keep waiting in virt_kiq_rreg, which cause
725 * gpu_recover() hang there.
727 * also don't wait anymore for IRQ context
729 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
730 goto failed_kiq_read;
733 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
734 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
735 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
738 if (cnt > MAX_KIQ_REG_TRY)
739 goto failed_kiq_read;
742 value = adev->wb.wb[reg_val_offs];
743 amdgpu_device_wb_free(adev, reg_val_offs);
747 amdgpu_ring_undo(ring);
749 spin_unlock_irqrestore(&kiq->ring_lock, flags);
752 amdgpu_device_wb_free(adev, reg_val_offs);
753 dev_err(adev->dev, "failed to read reg:%x\n", reg);
757 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
759 signed long r, cnt = 0;
762 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
763 struct amdgpu_ring *ring = &kiq->ring;
765 BUG_ON(!ring->funcs->emit_wreg);
767 if (adev->in_pci_err_recovery)
770 spin_lock_irqsave(&kiq->ring_lock, flags);
771 amdgpu_ring_alloc(ring, 32);
772 amdgpu_ring_emit_wreg(ring, reg, v);
773 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
777 amdgpu_ring_commit(ring);
778 spin_unlock_irqrestore(&kiq->ring_lock, flags);
780 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
782 /* don't wait anymore for gpu reset case because this way may
783 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
784 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
785 * never return if we keep waiting in virt_kiq_rreg, which cause
786 * gpu_recover() hang there.
788 * also don't wait anymore for IRQ context
790 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
791 goto failed_kiq_write;
794 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
796 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
797 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
800 if (cnt > MAX_KIQ_REG_TRY)
801 goto failed_kiq_write;
806 amdgpu_ring_undo(ring);
807 spin_unlock_irqrestore(&kiq->ring_lock, flags);
809 dev_err(adev->dev, "failed to write reg:%x\n", reg);
812 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
814 if (amdgpu_num_kcq == -1) {
816 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
817 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
820 return amdgpu_num_kcq;
823 /* amdgpu_gfx_state_change_set - Handle gfx power state change set
824 * @adev: amdgpu_device pointer
825 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
829 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
831 if (is_support_sw_smu(adev)) {
832 smu_gfx_state_change_set(&adev->smu, state);
834 mutex_lock(&adev->pm.mutex);
835 if (adev->powerplay.pp_funcs &&
836 adev->powerplay.pp_funcs->gfx_state_change_set)
837 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
838 (adev)->powerplay.pp_handle, state));
839 mutex_unlock(&adev->pm.mutex);