2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/firmware.h>
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_rlc.h"
30 #include "amdgpu_ras.h"
31 #include "amdgpu_xcp.h"
32 #include "amdgpu_xgmi.h"
34 /* delay 0.1 second to enable gfx off feature */
35 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
37 #define GFX_OFF_NO_DELAY 0
40 * GPU GFX IP block helpers function.
43 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
48 bit += mec * adev->gfx.mec.num_pipe_per_mec
49 * adev->gfx.mec.num_queue_per_pipe;
50 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
56 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
57 int *mec, int *pipe, int *queue)
59 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
60 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
61 % adev->gfx.mec.num_pipe_per_mec;
62 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
63 / adev->gfx.mec.num_pipe_per_mec;
67 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
68 int xcc_id, int mec, int pipe, int queue)
70 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
71 adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
74 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
75 int me, int pipe, int queue)
79 bit += me * adev->gfx.me.num_pipe_per_me
80 * adev->gfx.me.num_queue_per_pipe;
81 bit += pipe * adev->gfx.me.num_queue_per_pipe;
87 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
88 int *me, int *pipe, int *queue)
90 *queue = bit % adev->gfx.me.num_queue_per_pipe;
91 *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
92 % adev->gfx.me.num_pipe_per_me;
93 *me = (bit / adev->gfx.me.num_queue_per_pipe)
94 / adev->gfx.me.num_pipe_per_me;
97 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
98 int me, int pipe, int queue)
100 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
101 adev->gfx.me.queue_bitmap);
105 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
107 * @mask: array in which the per-shader array disable masks will be stored
108 * @max_se: number of SEs
109 * @max_sh: number of SHs
111 * The bitmask of CUs to be disabled in the shader array determined by se and
112 * sh is stored in mask[se * max_sh + sh].
114 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
116 unsigned int se, sh, cu;
119 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
121 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
124 p = amdgpu_disable_cu;
127 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
130 DRM_ERROR("amdgpu: could not parse disable_cu\n");
134 if (se < max_se && sh < max_sh && cu < 16) {
135 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
136 mask[se * max_sh + sh] |= 1u << cu;
138 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
142 next = strchr(p, ',');
149 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
151 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
154 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
156 if (amdgpu_compute_multipipe != -1) {
157 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
158 amdgpu_compute_multipipe);
159 return amdgpu_compute_multipipe == 1;
162 if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
165 /* FIXME: spreading the queues across pipes causes perf regressions
166 * on POLARIS11 compute workloads */
167 if (adev->asic_type == CHIP_POLARIS11)
170 return adev->gfx.mec.num_mec > 1;
173 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
174 struct amdgpu_ring *ring)
176 int queue = ring->queue;
177 int pipe = ring->pipe;
179 /* Policy: use pipe1 queue0 as high priority graphics queue if we
180 * have more than one gfx pipe.
182 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
183 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
187 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
188 if (ring == &adev->gfx.gfx_ring[bit])
195 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
196 struct amdgpu_ring *ring)
198 /* Policy: use 1st queue as high priority compute queue if we
199 * have more than one compute queue.
201 if (adev->gfx.num_compute_rings > 1 &&
202 ring == &adev->gfx.compute_ring[0])
208 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
210 int i, j, queue, pipe;
211 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
212 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
213 adev->gfx.mec.num_queue_per_pipe,
214 adev->gfx.num_compute_rings);
215 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
217 if (multipipe_policy) {
218 /* policy: make queues evenly cross all pipes on MEC1 only
219 * for multiple xcc, just use the original policy for simplicity */
220 for (j = 0; j < num_xcc; j++) {
221 for (i = 0; i < max_queues_per_mec; i++) {
222 pipe = i % adev->gfx.mec.num_pipe_per_mec;
223 queue = (i / adev->gfx.mec.num_pipe_per_mec) %
224 adev->gfx.mec.num_queue_per_pipe;
226 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
227 adev->gfx.mec_bitmap[j].queue_bitmap);
231 /* policy: amdgpu owns all queues in the given pipe */
232 for (j = 0; j < num_xcc; j++) {
233 for (i = 0; i < max_queues_per_mec; ++i)
234 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
238 for (j = 0; j < num_xcc; j++) {
239 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
240 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
244 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
247 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
248 int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
249 adev->gfx.me.num_queue_per_pipe;
251 if (multipipe_policy) {
252 /* policy: amdgpu owns the first queue per pipe at this stage
253 * will extend to mulitple queues per pipe later */
254 for (i = 0; i < max_queues_per_me; i++) {
255 pipe = i % adev->gfx.me.num_pipe_per_me;
256 queue = (i / adev->gfx.me.num_pipe_per_me) %
257 adev->gfx.me.num_queue_per_pipe;
259 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
260 adev->gfx.me.queue_bitmap);
263 for (i = 0; i < max_queues_per_me; ++i)
264 set_bit(i, adev->gfx.me.queue_bitmap);
267 /* update the number of active graphics rings */
268 adev->gfx.num_gfx_rings =
269 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
272 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
273 struct amdgpu_ring *ring, int xcc_id)
276 int mec, pipe, queue;
278 queue_bit = adev->gfx.mec.num_mec
279 * adev->gfx.mec.num_pipe_per_mec
280 * adev->gfx.mec.num_queue_per_pipe;
282 while (--queue_bit >= 0) {
283 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
286 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
289 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
290 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
291 * only can be issued on queue 0.
293 if ((mec == 1 && pipe > 1) || queue != 0)
303 dev_err(adev->dev, "Failed to find a queue for KIQ\n");
307 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
308 struct amdgpu_ring *ring,
309 struct amdgpu_irq_src *irq, int xcc_id)
311 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
314 spin_lock_init(&kiq->ring_lock);
317 ring->ring_obj = NULL;
318 ring->use_doorbell = true;
319 ring->xcc_id = xcc_id;
320 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
321 ring->doorbell_index =
322 (adev->doorbell_index.kiq +
323 xcc_id * adev->doorbell_index.xcc_doorbell_range)
326 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
330 ring->eop_gpu_addr = kiq->eop_gpu_addr;
331 ring->no_scheduler = true;
332 sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
333 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
334 AMDGPU_RING_PRIO_DEFAULT, NULL);
336 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
341 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
343 amdgpu_ring_fini(ring);
346 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
348 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
350 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
353 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
354 unsigned int hpd_size, int xcc_id)
358 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
360 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
361 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
362 &kiq->eop_gpu_addr, (void **)&hpd);
364 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
368 memset(hpd, 0, hpd_size);
370 r = amdgpu_bo_reserve(kiq->eop_obj, true);
371 if (unlikely(r != 0))
372 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
373 amdgpu_bo_kunmap(kiq->eop_obj);
374 amdgpu_bo_unreserve(kiq->eop_obj);
379 /* create MQD for each compute/gfx queue */
380 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
381 unsigned int mqd_size, int xcc_id)
384 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
385 struct amdgpu_ring *ring = &kiq->ring;
386 u32 domain = AMDGPU_GEM_DOMAIN_GTT;
388 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
389 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
390 domain |= AMDGPU_GEM_DOMAIN_VRAM;
392 /* create MQD for KIQ */
393 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
394 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
395 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
396 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
397 * KIQ MQD no matter SRIOV or Bare-metal
399 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
400 AMDGPU_GEM_DOMAIN_VRAM |
401 AMDGPU_GEM_DOMAIN_GTT,
406 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
410 /* prepare MQD backup */
411 kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
412 if (!kiq->mqd_backup) {
414 "no memory to create MQD backup for ring %s\n", ring->name);
419 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
420 /* create MQD for each KGQ */
421 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
422 ring = &adev->gfx.gfx_ring[i];
423 if (!ring->mqd_obj) {
424 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
425 domain, &ring->mqd_obj,
426 &ring->mqd_gpu_addr, &ring->mqd_ptr);
428 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
432 ring->mqd_size = mqd_size;
433 /* prepare MQD backup */
434 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
435 if (!adev->gfx.me.mqd_backup[i]) {
436 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
443 /* create MQD for each KCQ */
444 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
445 j = i + xcc_id * adev->gfx.num_compute_rings;
446 ring = &adev->gfx.compute_ring[j];
447 if (!ring->mqd_obj) {
448 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
449 domain, &ring->mqd_obj,
450 &ring->mqd_gpu_addr, &ring->mqd_ptr);
452 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
456 ring->mqd_size = mqd_size;
457 /* prepare MQD backup */
458 adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
459 if (!adev->gfx.mec.mqd_backup[j]) {
460 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
469 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
471 struct amdgpu_ring *ring = NULL;
473 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
475 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
476 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
477 ring = &adev->gfx.gfx_ring[i];
478 kfree(adev->gfx.me.mqd_backup[i]);
479 amdgpu_bo_free_kernel(&ring->mqd_obj,
485 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
486 j = i + xcc_id * adev->gfx.num_compute_rings;
487 ring = &adev->gfx.compute_ring[j];
488 kfree(adev->gfx.mec.mqd_backup[j]);
489 amdgpu_bo_free_kernel(&ring->mqd_obj,
495 kfree(kiq->mqd_backup);
496 amdgpu_bo_free_kernel(&ring->mqd_obj,
501 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
503 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
504 struct amdgpu_ring *kiq_ring = &kiq->ring;
505 struct amdgpu_hive_info *hive;
506 struct amdgpu_ras *ras;
507 int hive_ras_recovery = 0;
511 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
514 spin_lock(&kiq->ring_lock);
515 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
516 adev->gfx.num_compute_rings)) {
517 spin_unlock(&kiq->ring_lock);
521 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
522 j = i + xcc_id * adev->gfx.num_compute_rings;
523 kiq->pmf->kiq_unmap_queues(kiq_ring,
524 &adev->gfx.compute_ring[j],
529 * This is workaround: only skip kiq_ring test
530 * during ras recovery in suspend stage for gfx9.4.3
532 hive = amdgpu_get_xgmi_hive(adev);
534 hive_ras_recovery = atomic_read(&hive->ras_recovery);
535 amdgpu_put_xgmi_hive(hive);
538 ras = amdgpu_ras_get_context(adev);
539 if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
540 ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) {
541 spin_unlock(&kiq->ring_lock);
545 if (kiq_ring->sched.ready && !adev->job_hang)
546 r = amdgpu_ring_test_helper(kiq_ring);
547 spin_unlock(&kiq->ring_lock);
552 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
554 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
555 struct amdgpu_ring *kiq_ring = &kiq->ring;
559 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
562 spin_lock(&kiq->ring_lock);
563 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
564 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
565 adev->gfx.num_gfx_rings)) {
566 spin_unlock(&kiq->ring_lock);
570 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
571 j = i + xcc_id * adev->gfx.num_gfx_rings;
572 kiq->pmf->kiq_unmap_queues(kiq_ring,
573 &adev->gfx.gfx_ring[j],
574 PREEMPT_QUEUES, 0, 0);
578 if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
579 r = amdgpu_ring_test_helper(kiq_ring);
580 spin_unlock(&kiq->ring_lock);
585 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
588 int mec, pipe, queue;
589 int set_resource_bit = 0;
591 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
593 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
595 return set_resource_bit;
598 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
600 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
601 struct amdgpu_ring *kiq_ring = &kiq->ring;
602 uint64_t queue_mask = 0;
605 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
608 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
609 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
612 /* This situation may be hit in the future if a new HW
613 * generation exposes more than 64 queues. If so, the
614 * definition of queue_mask needs updating */
615 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
616 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
620 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
623 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
625 amdgpu_device_flush_hdp(adev, NULL);
627 spin_lock(&kiq->ring_lock);
628 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
629 adev->gfx.num_compute_rings +
630 kiq->pmf->set_resources_size);
632 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
633 spin_unlock(&kiq->ring_lock);
637 if (adev->enable_mes)
640 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
641 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
642 j = i + xcc_id * adev->gfx.num_compute_rings;
643 kiq->pmf->kiq_map_queues(kiq_ring,
644 &adev->gfx.compute_ring[j]);
647 r = amdgpu_ring_test_helper(kiq_ring);
648 spin_unlock(&kiq->ring_lock);
650 DRM_ERROR("KCQ enable failed\n");
655 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
657 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
658 struct amdgpu_ring *kiq_ring = &kiq->ring;
661 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
664 amdgpu_device_flush_hdp(adev, NULL);
666 spin_lock(&kiq->ring_lock);
667 /* No need to map kcq on the slave */
668 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
669 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
670 adev->gfx.num_gfx_rings);
672 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
673 spin_unlock(&kiq->ring_lock);
677 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
678 j = i + xcc_id * adev->gfx.num_gfx_rings;
679 kiq->pmf->kiq_map_queues(kiq_ring,
680 &adev->gfx.gfx_ring[j]);
684 r = amdgpu_ring_test_helper(kiq_ring);
685 spin_unlock(&kiq->ring_lock);
687 DRM_ERROR("KCQ enable failed\n");
692 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
694 * @adev: amdgpu_device pointer
695 * @bool enable true: enable gfx off feature, false: disable gfx off feature
697 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
698 * 2. other client can send request to disable gfx off feature, the request should be honored.
699 * 3. other client can cancel their request of disable gfx off feature
700 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
703 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
705 unsigned long delay = GFX_OFF_DELAY_ENABLE;
707 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
710 mutex_lock(&adev->gfx.gfx_off_mutex);
713 /* If the count is already 0, it means there's an imbalance bug somewhere.
714 * Note that the bug may be in a different caller than the one which triggers the
717 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
720 adev->gfx.gfx_off_req_count--;
722 if (adev->gfx.gfx_off_req_count == 0 &&
723 !adev->gfx.gfx_off_state) {
724 schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
728 if (adev->gfx.gfx_off_req_count == 0) {
729 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
731 if (adev->gfx.gfx_off_state &&
732 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
733 adev->gfx.gfx_off_state = false;
735 if (adev->gfx.funcs->init_spm_golden) {
737 "GFXOFF is disabled, re-init SPM golden settings\n");
738 amdgpu_gfx_init_spm_golden(adev);
743 adev->gfx.gfx_off_req_count++;
747 mutex_unlock(&adev->gfx.gfx_off_mutex);
750 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
754 mutex_lock(&adev->gfx.gfx_off_mutex);
756 r = amdgpu_dpm_set_residency_gfxoff(adev, value);
758 mutex_unlock(&adev->gfx.gfx_off_mutex);
763 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
767 mutex_lock(&adev->gfx.gfx_off_mutex);
769 r = amdgpu_dpm_get_residency_gfxoff(adev, value);
771 mutex_unlock(&adev->gfx.gfx_off_mutex);
776 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
780 mutex_lock(&adev->gfx.gfx_off_mutex);
782 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
784 mutex_unlock(&adev->gfx.gfx_off_mutex);
789 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
794 mutex_lock(&adev->gfx.gfx_off_mutex);
796 r = amdgpu_dpm_get_status_gfxoff(adev, value);
798 mutex_unlock(&adev->gfx.gfx_off_mutex);
803 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
807 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
808 if (!amdgpu_persistent_edc_harvesting_supported(adev))
809 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
811 r = amdgpu_ras_block_late_init(adev, ras_block);
815 if (adev->gfx.cp_ecc_error_irq.funcs) {
816 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
821 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
826 amdgpu_ras_block_late_fini(adev, ras_block);
830 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
833 struct amdgpu_gfx_ras *ras = NULL;
835 /* adev->gfx.ras is NULL, which means gfx does not
836 * support ras function, then do nothing here.
843 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
845 dev_err(adev->dev, "Failed to register gfx ras block!\n");
849 strcpy(ras->ras_block.ras_comm.name, "gfx");
850 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
851 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
852 adev->gfx.ras_if = &ras->ras_block.ras_comm;
854 /* If not define special ras_late_init function, use gfx default ras_late_init */
855 if (!ras->ras_block.ras_late_init)
856 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
858 /* If not defined special ras_cb function, use default ras_cb */
859 if (!ras->ras_block.ras_cb)
860 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
865 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
866 struct amdgpu_iv_entry *entry)
868 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
869 return adev->gfx.ras->poison_consumption_handler(adev, entry);
874 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
876 struct amdgpu_iv_entry *entry)
878 /* TODO ue will trigger an interrupt.
880 * When “Full RAS” is enabled, the per-IP interrupt sources should
881 * be disabled and the driver should only look for the aggregated
882 * interrupt via sync flood
884 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
885 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
886 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
887 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
888 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
889 amdgpu_ras_reset_gpu(adev);
891 return AMDGPU_RAS_SUCCESS;
894 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
895 struct amdgpu_irq_src *source,
896 struct amdgpu_iv_entry *entry)
898 struct ras_common_if *ras_if = adev->gfx.ras_if;
899 struct ras_dispatch_if ih_data = {
906 ih_data.head = *ras_if;
908 DRM_ERROR("CP ECC ERROR IRQ\n");
909 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
913 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
914 void *ras_error_status,
915 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
919 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
920 uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
921 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
924 err_data->ue_count = 0;
925 err_data->ce_count = 0;
928 for_each_inst(i, xcc_mask)
929 func(adev, ras_error_status, i);
932 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
934 signed long r, cnt = 0;
936 uint32_t seq, reg_val_offs = 0, value = 0;
937 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
938 struct amdgpu_ring *ring = &kiq->ring;
940 if (amdgpu_device_skip_hw_access(adev))
943 if (adev->mes.ring.sched.ready)
944 return amdgpu_mes_rreg(adev, reg);
946 BUG_ON(!ring->funcs->emit_rreg);
948 spin_lock_irqsave(&kiq->ring_lock, flags);
949 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
950 pr_err("critical bug! too many kiq readers\n");
953 amdgpu_ring_alloc(ring, 32);
954 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
955 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
959 amdgpu_ring_commit(ring);
960 spin_unlock_irqrestore(&kiq->ring_lock, flags);
962 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
964 /* don't wait anymore for gpu reset case because this way may
965 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
966 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
967 * never return if we keep waiting in virt_kiq_rreg, which cause
968 * gpu_recover() hang there.
970 * also don't wait anymore for IRQ context
972 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
973 goto failed_kiq_read;
976 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
977 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
978 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
981 if (cnt > MAX_KIQ_REG_TRY)
982 goto failed_kiq_read;
985 value = adev->wb.wb[reg_val_offs];
986 amdgpu_device_wb_free(adev, reg_val_offs);
990 amdgpu_ring_undo(ring);
992 spin_unlock_irqrestore(&kiq->ring_lock, flags);
995 amdgpu_device_wb_free(adev, reg_val_offs);
996 dev_err(adev->dev, "failed to read reg:%x\n", reg);
1000 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1002 signed long r, cnt = 0;
1003 unsigned long flags;
1005 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
1006 struct amdgpu_ring *ring = &kiq->ring;
1008 BUG_ON(!ring->funcs->emit_wreg);
1010 if (amdgpu_device_skip_hw_access(adev))
1013 if (adev->mes.ring.sched.ready) {
1014 amdgpu_mes_wreg(adev, reg, v);
1018 spin_lock_irqsave(&kiq->ring_lock, flags);
1019 amdgpu_ring_alloc(ring, 32);
1020 amdgpu_ring_emit_wreg(ring, reg, v);
1021 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
1025 amdgpu_ring_commit(ring);
1026 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1028 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1030 /* don't wait anymore for gpu reset case because this way may
1031 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
1032 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
1033 * never return if we keep waiting in virt_kiq_rreg, which cause
1034 * gpu_recover() hang there.
1036 * also don't wait anymore for IRQ context
1038 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
1039 goto failed_kiq_write;
1042 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
1044 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
1045 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
1048 if (cnt > MAX_KIQ_REG_TRY)
1049 goto failed_kiq_write;
1054 amdgpu_ring_undo(ring);
1055 spin_unlock_irqrestore(&kiq->ring_lock, flags);
1057 dev_err(adev->dev, "failed to write reg:%x\n", reg);
1060 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
1062 if (amdgpu_num_kcq == -1) {
1064 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
1065 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
1068 return amdgpu_num_kcq;
1071 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
1074 const struct gfx_firmware_header_v1_0 *cp_hdr;
1075 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
1076 struct amdgpu_firmware_info *info = NULL;
1077 const struct firmware *ucode_fw;
1078 unsigned int fw_size;
1081 case AMDGPU_UCODE_ID_CP_PFP:
1082 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1083 adev->gfx.pfp_fw->data;
1084 adev->gfx.pfp_fw_version =
1085 le32_to_cpu(cp_hdr->header.ucode_version);
1086 adev->gfx.pfp_feature_version =
1087 le32_to_cpu(cp_hdr->ucode_feature_version);
1088 ucode_fw = adev->gfx.pfp_fw;
1089 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1091 case AMDGPU_UCODE_ID_CP_RS64_PFP:
1092 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1093 adev->gfx.pfp_fw->data;
1094 adev->gfx.pfp_fw_version =
1095 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1096 adev->gfx.pfp_feature_version =
1097 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1098 ucode_fw = adev->gfx.pfp_fw;
1099 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1101 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
1102 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1103 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1104 adev->gfx.pfp_fw->data;
1105 ucode_fw = adev->gfx.pfp_fw;
1106 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1108 case AMDGPU_UCODE_ID_CP_ME:
1109 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1110 adev->gfx.me_fw->data;
1111 adev->gfx.me_fw_version =
1112 le32_to_cpu(cp_hdr->header.ucode_version);
1113 adev->gfx.me_feature_version =
1114 le32_to_cpu(cp_hdr->ucode_feature_version);
1115 ucode_fw = adev->gfx.me_fw;
1116 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1118 case AMDGPU_UCODE_ID_CP_RS64_ME:
1119 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1120 adev->gfx.me_fw->data;
1121 adev->gfx.me_fw_version =
1122 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1123 adev->gfx.me_feature_version =
1124 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1125 ucode_fw = adev->gfx.me_fw;
1126 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1128 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1129 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1130 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1131 adev->gfx.me_fw->data;
1132 ucode_fw = adev->gfx.me_fw;
1133 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1135 case AMDGPU_UCODE_ID_CP_CE:
1136 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1137 adev->gfx.ce_fw->data;
1138 adev->gfx.ce_fw_version =
1139 le32_to_cpu(cp_hdr->header.ucode_version);
1140 adev->gfx.ce_feature_version =
1141 le32_to_cpu(cp_hdr->ucode_feature_version);
1142 ucode_fw = adev->gfx.ce_fw;
1143 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1145 case AMDGPU_UCODE_ID_CP_MEC1:
1146 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1147 adev->gfx.mec_fw->data;
1148 adev->gfx.mec_fw_version =
1149 le32_to_cpu(cp_hdr->header.ucode_version);
1150 adev->gfx.mec_feature_version =
1151 le32_to_cpu(cp_hdr->ucode_feature_version);
1152 ucode_fw = adev->gfx.mec_fw;
1153 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1154 le32_to_cpu(cp_hdr->jt_size) * 4;
1156 case AMDGPU_UCODE_ID_CP_MEC1_JT:
1157 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1158 adev->gfx.mec_fw->data;
1159 ucode_fw = adev->gfx.mec_fw;
1160 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1162 case AMDGPU_UCODE_ID_CP_MEC2:
1163 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1164 adev->gfx.mec2_fw->data;
1165 adev->gfx.mec2_fw_version =
1166 le32_to_cpu(cp_hdr->header.ucode_version);
1167 adev->gfx.mec2_feature_version =
1168 le32_to_cpu(cp_hdr->ucode_feature_version);
1169 ucode_fw = adev->gfx.mec2_fw;
1170 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1171 le32_to_cpu(cp_hdr->jt_size) * 4;
1173 case AMDGPU_UCODE_ID_CP_MEC2_JT:
1174 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1175 adev->gfx.mec2_fw->data;
1176 ucode_fw = adev->gfx.mec2_fw;
1177 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
1179 case AMDGPU_UCODE_ID_CP_RS64_MEC:
1180 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1181 adev->gfx.mec_fw->data;
1182 adev->gfx.mec_fw_version =
1183 le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
1184 adev->gfx.mec_feature_version =
1185 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
1186 ucode_fw = adev->gfx.mec_fw;
1187 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
1189 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1190 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1191 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1192 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1193 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
1194 adev->gfx.mec_fw->data;
1195 ucode_fw = adev->gfx.mec_fw;
1196 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
1202 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1203 info = &adev->firmware.ucode[ucode_id];
1204 info->ucode_id = ucode_id;
1205 info->fw = ucode_fw;
1206 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
1210 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
1212 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
1213 adev->gfx.num_xcc_per_xcp : 1));
1216 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
1217 struct device_attribute *addr,
1220 struct drm_device *ddev = dev_get_drvdata(dev);
1221 struct amdgpu_device *adev = drm_to_adev(ddev);
1224 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1225 AMDGPU_XCP_FL_NONE);
1227 return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
1230 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
1231 struct device_attribute *addr,
1232 const char *buf, size_t count)
1234 struct drm_device *ddev = dev_get_drvdata(dev);
1235 struct amdgpu_device *adev = drm_to_adev(ddev);
1236 enum amdgpu_gfx_partition mode;
1237 int ret = 0, num_xcc;
1239 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1240 if (num_xcc % 2 != 0)
1243 if (!strncasecmp("SPX", buf, strlen("SPX"))) {
1244 mode = AMDGPU_SPX_PARTITION_MODE;
1245 } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
1247 * DPX mode needs AIDs to be in multiple of 2.
1248 * Each AID connects 2 XCCs.
1252 mode = AMDGPU_DPX_PARTITION_MODE;
1253 } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
1256 mode = AMDGPU_TPX_PARTITION_MODE;
1257 } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
1260 mode = AMDGPU_QPX_PARTITION_MODE;
1261 } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
1262 mode = AMDGPU_CPX_PARTITION_MODE;
1267 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
1275 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
1276 struct device_attribute *addr,
1279 struct drm_device *ddev = dev_get_drvdata(dev);
1280 struct amdgpu_device *adev = drm_to_adev(ddev);
1281 char *supported_partition;
1284 switch (NUM_XCC(adev->gfx.xcc_mask)) {
1286 supported_partition = "SPX, DPX, QPX, CPX";
1289 supported_partition = "SPX, TPX, CPX";
1292 supported_partition = "SPX, DPX, CPX";
1294 /* this seems only existing in emulation phase */
1296 supported_partition = "SPX, CPX";
1299 supported_partition = "Not supported";
1303 return sysfs_emit(buf, "%s\n", supported_partition);
1306 static DEVICE_ATTR(current_compute_partition, 0644,
1307 amdgpu_gfx_get_current_compute_partition,
1308 amdgpu_gfx_set_compute_partition);
1310 static DEVICE_ATTR(available_compute_partition, 0444,
1311 amdgpu_gfx_get_available_compute_partition, NULL);
1313 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
1317 r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
1321 r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
1326 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
1328 device_remove_file(adev->dev, &dev_attr_current_compute_partition);
1329 device_remove_file(adev->dev, &dev_attr_available_compute_partition);