2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
62 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
64 #include "amdgpu_ras.h"
65 #include "amdgpu_xgmi.h"
67 #include "amdgpu_reset.h"
69 /* add these here since we already include dce12 headers and these are for DCN */
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
71 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
75 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
77 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
80 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
82 #define MAX_MEM_RANGES 8
84 static const char *gfxhub_client_ids[] = {
100 static const char *mmhub_client_ids_raven[][2] = {
125 static const char *mmhub_client_ids_renoir[][2] = {
153 static const char *mmhub_client_ids_vega10[][2] = {
166 [32+14][0] = "SDMA0",
179 [32+4][1] = "DCEDWB",
182 [32+14][1] = "SDMA1",
185 static const char *mmhub_client_ids_vega12[][2] = {
198 [32+15][0] = "SDMA0",
208 [32+1][1] = "DCEDWB",
214 [32+15][1] = "SDMA1",
217 static const char *mmhub_client_ids_vega20[][2] = {
231 [32+12][0] = "UTCL2",
232 [32+14][0] = "SDMA1",
250 [32+14][1] = "SDMA1",
253 static const char *mmhub_client_ids_arcturus[][2] = {
294 static const char *mmhub_client_ids_aldebaran[][2] = {
297 [32+1][0] = "DBGU_IO0",
298 [32+2][0] = "DBGU_IO2",
300 [96+11][0] = "JPEG0",
302 [96+13][0] = "VCNU0",
303 [128+11][0] = "JPEG1",
304 [128+12][0] = "VCN1",
305 [128+13][0] = "VCNU1",
308 [256+0][0] = "SDMA0",
309 [256+1][0] = "SDMA1",
310 [256+2][0] = "SDMA2",
311 [256+3][0] = "SDMA3",
312 [256+4][0] = "SDMA4",
316 [32+1][1] = "DBGU_IO0",
317 [32+2][1] = "DBGU_IO2",
319 [96+11][1] = "JPEG0",
321 [96+13][1] = "VCNU0",
322 [128+11][1] = "JPEG1",
323 [128+12][1] = "VCN1",
324 [128+13][1] = "VCNU1",
327 [256+0][1] = "SDMA0",
328 [256+1][1] = "SDMA1",
329 [256+2][1] = "SDMA2",
330 [256+3][1] = "SDMA3",
331 [256+4][1] = "SDMA4",
335 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
337 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
338 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
341 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
343 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
344 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
347 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
348 (0x000143c0 + 0x00000000),
349 (0x000143c0 + 0x00000800),
350 (0x000143c0 + 0x00001000),
351 (0x000143c0 + 0x00001800),
352 (0x000543c0 + 0x00000000),
353 (0x000543c0 + 0x00000800),
354 (0x000543c0 + 0x00001000),
355 (0x000543c0 + 0x00001800),
356 (0x000943c0 + 0x00000000),
357 (0x000943c0 + 0x00000800),
358 (0x000943c0 + 0x00001000),
359 (0x000943c0 + 0x00001800),
360 (0x000d43c0 + 0x00000000),
361 (0x000d43c0 + 0x00000800),
362 (0x000d43c0 + 0x00001000),
363 (0x000d43c0 + 0x00001800),
364 (0x001143c0 + 0x00000000),
365 (0x001143c0 + 0x00000800),
366 (0x001143c0 + 0x00001000),
367 (0x001143c0 + 0x00001800),
368 (0x001543c0 + 0x00000000),
369 (0x001543c0 + 0x00000800),
370 (0x001543c0 + 0x00001000),
371 (0x001543c0 + 0x00001800),
372 (0x001943c0 + 0x00000000),
373 (0x001943c0 + 0x00000800),
374 (0x001943c0 + 0x00001000),
375 (0x001943c0 + 0x00001800),
376 (0x001d43c0 + 0x00000000),
377 (0x001d43c0 + 0x00000800),
378 (0x001d43c0 + 0x00001000),
379 (0x001d43c0 + 0x00001800),
382 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
383 (0x000143e0 + 0x00000000),
384 (0x000143e0 + 0x00000800),
385 (0x000143e0 + 0x00001000),
386 (0x000143e0 + 0x00001800),
387 (0x000543e0 + 0x00000000),
388 (0x000543e0 + 0x00000800),
389 (0x000543e0 + 0x00001000),
390 (0x000543e0 + 0x00001800),
391 (0x000943e0 + 0x00000000),
392 (0x000943e0 + 0x00000800),
393 (0x000943e0 + 0x00001000),
394 (0x000943e0 + 0x00001800),
395 (0x000d43e0 + 0x00000000),
396 (0x000d43e0 + 0x00000800),
397 (0x000d43e0 + 0x00001000),
398 (0x000d43e0 + 0x00001800),
399 (0x001143e0 + 0x00000000),
400 (0x001143e0 + 0x00000800),
401 (0x001143e0 + 0x00001000),
402 (0x001143e0 + 0x00001800),
403 (0x001543e0 + 0x00000000),
404 (0x001543e0 + 0x00000800),
405 (0x001543e0 + 0x00001000),
406 (0x001543e0 + 0x00001800),
407 (0x001943e0 + 0x00000000),
408 (0x001943e0 + 0x00000800),
409 (0x001943e0 + 0x00001000),
410 (0x001943e0 + 0x00001800),
411 (0x001d43e0 + 0x00000000),
412 (0x001d43e0 + 0x00000800),
413 (0x001d43e0 + 0x00001000),
414 (0x001d43e0 + 0x00001800),
417 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
418 struct amdgpu_irq_src *src,
420 enum amdgpu_interrupt_state state)
422 u32 bits, i, tmp, reg;
424 /* Devices newer then VEGA10/12 shall have these programming
425 sequences performed by PSP BL */
426 if (adev->asic_type >= CHIP_VEGA20)
432 case AMDGPU_IRQ_STATE_DISABLE:
433 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
434 reg = ecc_umc_mcumc_ctrl_addrs[i];
439 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
440 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
446 case AMDGPU_IRQ_STATE_ENABLE:
447 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
448 reg = ecc_umc_mcumc_ctrl_addrs[i];
453 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
454 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
467 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
468 struct amdgpu_irq_src *src,
470 enum amdgpu_interrupt_state state)
472 struct amdgpu_vmhub *hub;
473 u32 tmp, reg, bits, i, j;
475 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
480 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
481 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
484 case AMDGPU_IRQ_STATE_DISABLE:
485 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
486 hub = &adev->vmhub[j];
487 for (i = 0; i < 16; i++) {
488 reg = hub->vm_context0_cntl + i;
490 /* This works because this interrupt is only
491 * enabled at init/resume and disabled in
492 * fini/suspend, so the overall state doesn't
493 * change over the course of suspend/resume.
495 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
498 if (j >= AMDGPU_MMHUB0(0))
499 tmp = RREG32_SOC15_IP(MMHUB, reg);
501 tmp = RREG32_SOC15_IP(GC, reg);
505 if (j >= AMDGPU_MMHUB0(0))
506 WREG32_SOC15_IP(MMHUB, reg, tmp);
508 WREG32_SOC15_IP(GC, reg, tmp);
512 case AMDGPU_IRQ_STATE_ENABLE:
513 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
514 hub = &adev->vmhub[j];
515 for (i = 0; i < 16; i++) {
516 reg = hub->vm_context0_cntl + i;
518 /* This works because this interrupt is only
519 * enabled at init/resume and disabled in
520 * fini/suspend, so the overall state doesn't
521 * change over the course of suspend/resume.
523 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
526 if (j >= AMDGPU_MMHUB0(0))
527 tmp = RREG32_SOC15_IP(MMHUB, reg);
529 tmp = RREG32_SOC15_IP(GC, reg);
533 if (j >= AMDGPU_MMHUB0(0))
534 WREG32_SOC15_IP(MMHUB, reg, tmp);
536 WREG32_SOC15_IP(GC, reg, tmp);
547 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
548 struct amdgpu_irq_src *source,
549 struct amdgpu_iv_entry *entry)
551 bool retry_fault = !!(entry->src_data[1] & 0x80);
552 bool write_fault = !!(entry->src_data[1] & 0x20);
553 uint32_t status = 0, cid = 0, rw = 0;
554 struct amdgpu_task_info task_info;
555 struct amdgpu_vmhub *hub;
556 const char *mmhub_cid;
557 const char *hub_name;
559 uint32_t cam_index = 0;
563 node_id = entry->node_id;
565 addr = (u64)entry->src_data[0] << 12;
566 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
568 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
570 hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)];
571 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
573 hub = &adev->vmhub[AMDGPU_MMHUB1(0)];
575 hub_name = "gfxhub0";
576 if (adev->gfx.funcs->ih_node_to_logical_xcc) {
577 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
582 hub = &adev->vmhub[xcc_id];
586 if (adev->irq.retry_cam_enabled) {
587 /* Delegate it to a different ring if the hardware hasn't
590 if (entry->ih == &adev->irq.ih) {
591 amdgpu_irq_delegate(adev, entry, 8);
595 cam_index = entry->src_data[2] & 0x3ff;
597 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
599 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
603 /* Process it onyl if it's the first fault for this address */
604 if (entry->ih != &adev->irq.ih_soft &&
605 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
609 /* Delegate it to a different ring if the hardware hasn't
612 if (entry->ih == &adev->irq.ih) {
613 amdgpu_irq_delegate(adev, entry, 8);
617 /* Try to handle the recoverable page faults by filling page
620 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
626 if (!printk_ratelimit())
630 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
631 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
634 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
635 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
636 hub_name, retry_fault ? "retry" : "no-retry",
637 entry->src_id, entry->ring_id, entry->vmid,
638 entry->pasid, task_info.process_name, task_info.tgid,
639 task_info.task_name, task_info.pid);
640 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
641 addr, entry->client_id,
642 soc15_ih_clientid_name[entry->client_id]);
644 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
645 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
646 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
647 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
649 if (amdgpu_sriov_vf(adev))
653 * Issue a dummy read to wait for the status register to
654 * be updated to avoid reading an incorrect value due to
655 * the new fast GRBM interface.
657 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
658 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
659 RREG32(hub->vm_l2_pro_fault_status);
661 status = RREG32(hub->vm_l2_pro_fault_status);
662 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
663 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
664 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
667 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
669 if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
670 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
671 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
672 gfxhub_client_ids[cid],
675 switch (adev->ip_versions[MMHUB_HWIP][0]) {
676 case IP_VERSION(9, 0, 0):
677 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
679 case IP_VERSION(9, 3, 0):
680 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
682 case IP_VERSION(9, 4, 0):
683 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
685 case IP_VERSION(9, 4, 1):
686 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
688 case IP_VERSION(9, 1, 0):
689 case IP_VERSION(9, 2, 0):
690 mmhub_cid = mmhub_client_ids_raven[cid][rw];
692 case IP_VERSION(1, 5, 0):
693 case IP_VERSION(2, 4, 0):
694 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
696 case IP_VERSION(1, 8, 0):
697 case IP_VERSION(9, 4, 2):
698 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
704 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
705 mmhub_cid ? mmhub_cid : "unknown", cid);
707 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
708 REG_GET_FIELD(status,
709 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
710 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
711 REG_GET_FIELD(status,
712 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
713 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
714 REG_GET_FIELD(status,
715 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
716 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
717 REG_GET_FIELD(status,
718 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
719 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
723 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
724 .set = gmc_v9_0_vm_fault_interrupt_state,
725 .process = gmc_v9_0_process_interrupt,
729 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
730 .set = gmc_v9_0_ecc_interrupt_state,
731 .process = amdgpu_umc_process_ecc_irq,
734 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
736 adev->gmc.vm_fault.num_types = 1;
737 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
739 if (!amdgpu_sriov_vf(adev) &&
740 !adev->gmc.xgmi.connected_to_cpu) {
741 adev->gmc.ecc_irq.num_types = 1;
742 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
746 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
751 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
752 PER_VMID_INVALIDATE_REQ, 1 << vmid);
753 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
754 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
755 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
756 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
757 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
758 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
759 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
760 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
766 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
768 * @adev: amdgpu_device pointer
772 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
775 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
776 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
779 return ((vmhub == AMDGPU_MMHUB0(0) ||
780 vmhub == AMDGPU_MMHUB1(0)) &&
781 (!amdgpu_sriov_vf(adev)) &&
782 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
783 (adev->apu_flags & AMD_APU_IS_PICASSO))));
786 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
787 uint8_t vmid, uint16_t *p_pasid)
791 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
793 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
795 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
800 * VMID 0 is the physical GPU addresses as used by the kernel.
801 * VMIDs 1-15 are used for userspace clients and are handled
802 * by the amdgpu vm/hsa code.
806 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
808 * @adev: amdgpu_device pointer
809 * @vmid: vm instance to flush
810 * @vmhub: which hub to flush
811 * @flush_type: the flush type
813 * Flush the TLB for the requested page table using certain type.
815 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
816 uint32_t vmhub, uint32_t flush_type)
818 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
819 const unsigned eng = 17;
820 u32 j, inv_req, inv_req2, tmp;
821 struct amdgpu_vmhub *hub;
823 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
825 hub = &adev->vmhub[vmhub];
826 if (adev->gmc.xgmi.num_physical_nodes &&
827 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
828 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
829 * heavy-weight TLB flush (type 2), which flushes
830 * both. Due to a race condition with concurrent
831 * memory accesses using the same TLB cache line, we
832 * still need a second TLB flush after this.
834 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
835 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
836 } else if (flush_type == 2 &&
837 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
839 inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);
840 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
842 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
846 /* This is necessary for a HW workaround under SRIOV as well
847 * as GFXOFF under bare metal
849 if (adev->gfx.kiq[0].ring.sched.ready &&
850 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
851 down_read_trylock(&adev->reset_domain->sem)) {
852 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
853 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
855 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
857 up_read(&adev->reset_domain->sem);
861 spin_lock(&adev->gmc.invalidate_lock);
864 * It may lose gpuvm invalidate acknowldege state across power-gating
865 * off cycle, add semaphore acquire before invalidation and semaphore
866 * release after invalidation to avoid entering power gated state
870 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
872 for (j = 0; j < adev->usec_timeout; j++) {
873 /* a read return value of 1 means semaphore acquire */
874 if (vmhub >= AMDGPU_MMHUB0(0))
875 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
877 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
883 if (j >= adev->usec_timeout)
884 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
888 if (vmhub >= AMDGPU_MMHUB0(0))
889 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
891 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
894 * Issue a dummy read to wait for the ACK register to
895 * be cleared to avoid a false ACK due to the new fast
898 if ((vmhub == AMDGPU_GFXHUB(0)) &&
899 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
900 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
901 hub->eng_distance * eng);
903 for (j = 0; j < adev->usec_timeout; j++) {
904 if (vmhub >= AMDGPU_MMHUB0(0))
905 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
907 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
908 if (tmp & (1 << vmid))
917 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
920 * add semaphore release after invalidation,
921 * write with 0 means semaphore release
923 if (vmhub >= AMDGPU_MMHUB0(0))
924 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
926 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
929 spin_unlock(&adev->gmc.invalidate_lock);
931 if (j < adev->usec_timeout)
934 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
938 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
940 * @adev: amdgpu_device pointer
941 * @pasid: pasid to be flush
942 * @flush_type: the flush type
943 * @all_hub: flush all hubs
944 * @inst: is used to select which instance of KIQ to use for the invalidation
946 * Flush the TLB for the requested pasid.
948 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
949 uint16_t pasid, uint32_t flush_type,
950 bool all_hub, uint32_t inst)
955 uint16_t queried_pasid;
957 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
958 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
959 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
961 if (amdgpu_in_reset(adev))
964 if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
965 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
966 * heavy-weight TLB flush (type 2), which flushes
967 * both. Due to a race condition with concurrent
968 * memory accesses using the same TLB cache line, we
969 * still need a second TLB flush after this.
971 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
972 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
973 /* 2 dwords flush + 8 dwords fence */
974 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
977 ndw += kiq->pmf->invalidate_tlbs_size;
979 spin_lock(&adev->gfx.kiq[inst].ring_lock);
980 /* 2 dwords flush + 8 dwords fence */
981 amdgpu_ring_alloc(ring, ndw);
983 kiq->pmf->kiq_invalidate_tlbs(ring,
986 if (flush_type == 2 &&
987 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
989 kiq->pmf->kiq_invalidate_tlbs(ring,
992 kiq->pmf->kiq_invalidate_tlbs(ring,
993 pasid, flush_type, all_hub);
994 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
996 amdgpu_ring_undo(ring);
997 spin_unlock(&adev->gfx.kiq[inst].ring_lock);
998 up_read(&adev->reset_domain->sem);
1002 amdgpu_ring_commit(ring);
1003 spin_unlock(&adev->gfx.kiq[inst].ring_lock);
1004 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
1006 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
1007 up_read(&adev->reset_domain->sem);
1010 up_read(&adev->reset_domain->sem);
1014 for (vmid = 1; vmid < 16; vmid++) {
1016 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
1018 if (ret && queried_pasid == pasid) {
1020 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
1021 gmc_v9_0_flush_gpu_tlb(adev, vmid,
1024 gmc_v9_0_flush_gpu_tlb(adev, vmid,
1025 AMDGPU_GFXHUB(0), flush_type);
1035 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
1036 unsigned vmid, uint64_t pd_addr)
1038 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
1039 struct amdgpu_device *adev = ring->adev;
1040 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
1041 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
1042 unsigned eng = ring->vm_inv_eng;
1045 * It may lose gpuvm invalidate acknowldege state across power-gating
1046 * off cycle, add semaphore acquire before invalidation and semaphore
1047 * release after invalidation to avoid entering power gated state
1051 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1053 /* a read return value of 1 means semaphore acuqire */
1054 amdgpu_ring_emit_reg_wait(ring,
1055 hub->vm_inv_eng0_sem +
1056 hub->eng_distance * eng, 0x1, 0x1);
1058 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1059 (hub->ctx_addr_distance * vmid),
1060 lower_32_bits(pd_addr));
1062 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1063 (hub->ctx_addr_distance * vmid),
1064 upper_32_bits(pd_addr));
1066 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1067 hub->eng_distance * eng,
1068 hub->vm_inv_eng0_ack +
1069 hub->eng_distance * eng,
1072 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1075 * add semaphore release after invalidation,
1076 * write with 0 means semaphore release
1078 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1079 hub->eng_distance * eng, 0);
1084 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
1087 struct amdgpu_device *adev = ring->adev;
1090 /* Do nothing because there's no lut register for mmhub1. */
1091 if (ring->vm_hub == AMDGPU_MMHUB1(0))
1094 if (ring->vm_hub == AMDGPU_GFXHUB(0))
1095 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1097 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1099 amdgpu_ring_emit_wreg(ring, reg, pasid);
1103 * PTE format on VEGA 10:
1112 * 47:12 4k physical page base address
1122 * PDE format on VEGA 10:
1123 * 63:59 block fragment size
1127 * 47:6 physical base address of PD or PTE
1134 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1138 case AMDGPU_VM_MTYPE_DEFAULT:
1139 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1140 case AMDGPU_VM_MTYPE_NC:
1141 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1142 case AMDGPU_VM_MTYPE_WC:
1143 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1144 case AMDGPU_VM_MTYPE_RW:
1145 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1146 case AMDGPU_VM_MTYPE_CC:
1147 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1148 case AMDGPU_VM_MTYPE_UC:
1149 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1151 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1155 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1156 uint64_t *addr, uint64_t *flags)
1158 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1159 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1160 BUG_ON(*addr & 0xFFFF00000000003FULL);
1162 if (!adev->gmc.translate_further)
1165 if (level == AMDGPU_VM_PDB1) {
1166 /* Set the block fragment size */
1167 if (!(*flags & AMDGPU_PDE_PTE))
1168 *flags |= AMDGPU_PDE_BFS(0x9);
1170 } else if (level == AMDGPU_VM_PDB0) {
1171 if (*flags & AMDGPU_PDE_PTE) {
1172 *flags &= ~AMDGPU_PDE_PTE;
1173 if (!(*flags & AMDGPU_PTE_VALID))
1174 *addr |= 1 << PAGE_SHIFT;
1176 *flags |= AMDGPU_PTE_TF;
1181 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1182 struct amdgpu_bo *bo,
1183 struct amdgpu_bo_va_mapping *mapping,
1186 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1187 bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1188 bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
1189 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1190 struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1191 unsigned int mtype_local, mtype;
1195 switch (adev->ip_versions[GC_HWIP][0]) {
1196 case IP_VERSION(9, 4, 1):
1197 case IP_VERSION(9, 4, 2):
1199 if (bo_adev == adev) {
1206 /* FIXME: is this still needed? Or does
1207 * amdgpu_ttm_tt_pde_flags already handle this?
1209 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
1210 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&
1211 adev->gmc.xgmi.connected_to_cpu)
1214 if (uncached || coherent)
1218 if (mapping->bo_va->is_xgmi)
1222 if (uncached || coherent)
1226 /* FIXME: is this still needed? Or does
1227 * amdgpu_ttm_tt_pde_flags already handle this?
1232 case IP_VERSION(9, 4, 3):
1233 /* Only local VRAM BOs or system memory on non-NUMA APUs
1234 * can be assumed to be local in their entirety. Choose
1235 * MTYPE_NC as safe fallback for all system memory BOs on
1236 * NUMA systems. Their MTYPE can be overridden per-page in
1237 * gmc_v9_0_override_vm_pte_flags.
1239 mtype_local = MTYPE_RW;
1240 if (amdgpu_mtype_local == 1) {
1241 DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1242 mtype_local = MTYPE_NC;
1243 } else if (amdgpu_mtype_local == 2) {
1244 DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1245 mtype_local = MTYPE_CC;
1247 DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1249 is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1250 num_possible_nodes() <= 1) ||
1251 (is_vram && adev == bo_adev &&
1252 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1256 } else if (adev->flags & AMD_IS_APU) {
1257 mtype = is_local ? mtype_local : MTYPE_NC;
1261 mtype = mtype_local;
1270 if (uncached || coherent)
1275 /* FIXME: is this still needed? Or does
1276 * amdgpu_ttm_tt_pde_flags already handle this?
1282 if (mtype != MTYPE_NC)
1283 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1284 AMDGPU_PTE_MTYPE_VG10(mtype);
1285 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1288 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1289 struct amdgpu_bo_va_mapping *mapping,
1292 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1294 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1295 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1297 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1298 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1300 if (mapping->flags & AMDGPU_PTE_PRT) {
1301 *flags |= AMDGPU_PTE_PRT;
1302 *flags &= ~AMDGPU_PTE_VALID;
1305 if (bo && bo->tbo.resource)
1306 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1310 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1311 struct amdgpu_vm *vm,
1312 uint64_t addr, uint64_t *flags)
1314 int local_node, nid;
1316 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1317 * memory can use more efficient MTYPEs.
1319 if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
1322 /* Only direct-mapped memory allows us to determine the NUMA node from
1325 if (!adev->ram_is_direct_mapped) {
1326 dev_dbg(adev->dev, "RAM is not direct mapped\n");
1330 /* Only override mappings with MTYPE_NC, which is the safe default for
1333 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1334 AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
1335 dev_dbg(adev->dev, "MTYPE is not NC\n");
1339 /* FIXME: Only supported on native mode for now. For carve-out, the
1340 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1341 * memory partitions are not associated with different NUMA nodes.
1343 if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1344 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1346 dev_dbg(adev->dev, "Only native mode APU is supported.\n");
1350 /* Only handle real RAM. Mappings of PCIe resources don't have struct
1351 * page or NUMA nodes.
1353 if (!page_is_ram(addr >> PAGE_SHIFT)) {
1354 dev_dbg(adev->dev, "Page is not RAM.\n");
1357 nid = pfn_to_nid(addr >> PAGE_SHIFT);
1358 dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1359 vm->mem_id, local_node, nid);
1360 if (nid == local_node) {
1361 uint64_t old_flags = *flags;
1362 unsigned int mtype_local = MTYPE_RW;
1364 if (amdgpu_mtype_local == 1)
1365 mtype_local = MTYPE_NC;
1366 else if (amdgpu_mtype_local == 2)
1367 mtype_local = MTYPE_CC;
1369 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1370 AMDGPU_PTE_MTYPE_VG10(mtype_local);
1371 dev_dbg(adev->dev, "flags updated from %llx to %llx\n",
1376 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1378 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1381 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1383 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1384 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1388 switch (adev->ip_versions[DCE_HWIP][0]) {
1389 case IP_VERSION(1, 0, 0):
1390 case IP_VERSION(1, 0, 1):
1391 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1392 size = (REG_GET_FIELD(viewport,
1393 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1394 REG_GET_FIELD(viewport,
1395 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1398 case IP_VERSION(2, 1, 0):
1399 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1400 size = (REG_GET_FIELD(viewport,
1401 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1402 REG_GET_FIELD(viewport,
1403 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1407 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1408 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1409 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1418 static enum amdgpu_memory_partition
1419 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1421 enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1423 if (adev->nbio.funcs->get_memory_partition_mode)
1424 mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1430 static enum amdgpu_memory_partition
1431 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1433 if (amdgpu_sriov_vf(adev))
1434 return AMDGPU_NPS1_PARTITION_MODE;
1436 return gmc_v9_0_get_memory_partition(adev, NULL);
1439 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1440 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1441 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1442 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1443 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1444 .map_mtype = gmc_v9_0_map_mtype,
1445 .get_vm_pde = gmc_v9_0_get_vm_pde,
1446 .get_vm_pte = gmc_v9_0_get_vm_pte,
1447 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1448 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1449 .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1452 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1454 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1457 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1459 switch (adev->ip_versions[UMC_HWIP][0]) {
1460 case IP_VERSION(6, 0, 0):
1461 adev->umc.funcs = &umc_v6_0_funcs;
1463 case IP_VERSION(6, 1, 1):
1464 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1465 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1466 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1467 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1468 adev->umc.retire_unit = 1;
1469 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1470 adev->umc.ras = &umc_v6_1_ras;
1472 case IP_VERSION(6, 1, 2):
1473 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1474 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1475 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1476 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1477 adev->umc.retire_unit = 1;
1478 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1479 adev->umc.ras = &umc_v6_1_ras;
1481 case IP_VERSION(6, 7, 0):
1482 adev->umc.max_ras_err_cnt_per_query =
1483 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1484 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1485 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1486 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1487 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1488 if (!adev->gmc.xgmi.connected_to_cpu)
1489 adev->umc.ras = &umc_v6_7_ras;
1490 if (1 & adev->smuio.funcs->get_die_id(adev))
1491 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1493 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1500 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1502 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1503 case IP_VERSION(9, 4, 1):
1504 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1506 case IP_VERSION(9, 4, 2):
1507 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1509 case IP_VERSION(1, 8, 0):
1510 adev->mmhub.funcs = &mmhub_v1_8_funcs;
1513 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1518 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1520 switch (adev->ip_versions[MMHUB_HWIP][0]) {
1521 case IP_VERSION(9, 4, 0):
1522 adev->mmhub.ras = &mmhub_v1_0_ras;
1524 case IP_VERSION(9, 4, 1):
1525 adev->mmhub.ras = &mmhub_v9_4_ras;
1527 case IP_VERSION(9, 4, 2):
1528 adev->mmhub.ras = &mmhub_v1_7_ras;
1530 case IP_VERSION(1, 8, 0):
1531 adev->mmhub.ras = &mmhub_v1_8_ras;
1534 /* mmhub ras is not available */
1539 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1541 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
1542 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1544 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1547 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1549 adev->hdp.ras = &hdp_v4_0_ras;
1552 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1554 struct amdgpu_mca *mca = &adev->mca;
1556 /* is UMC the right IP to check for MCA? Maybe DF? */
1557 switch (adev->ip_versions[UMC_HWIP][0]) {
1558 case IP_VERSION(6, 7, 0):
1559 if (!adev->gmc.xgmi.connected_to_cpu) {
1560 mca->mp0.ras = &mca_v3_0_mp0_ras;
1561 mca->mp1.ras = &mca_v3_0_mp1_ras;
1562 mca->mpio.ras = &mca_v3_0_mpio_ras;
1570 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1572 if (!adev->gmc.xgmi.connected_to_cpu)
1573 adev->gmc.xgmi.ras = &xgmi_ras;
1576 static int gmc_v9_0_early_init(void *handle)
1578 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1581 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1582 * in their IP discovery tables
1584 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||
1585 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1586 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
1587 adev->gmc.xgmi.supported = true;
1589 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
1590 adev->gmc.xgmi.supported = true;
1591 adev->gmc.xgmi.connected_to_cpu =
1592 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1595 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
1596 enum amdgpu_pkg_type pkg_type =
1597 adev->smuio.funcs->get_pkg_type(adev);
1598 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1599 * and the APU, can be in used two possible modes:
1602 * "is_app_apu" can be used to identify the APU in the native
1605 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1606 !pci_resource_len(adev->pdev, 0));
1609 gmc_v9_0_set_gmc_funcs(adev);
1610 gmc_v9_0_set_irq_funcs(adev);
1611 gmc_v9_0_set_umc_funcs(adev);
1612 gmc_v9_0_set_mmhub_funcs(adev);
1613 gmc_v9_0_set_mmhub_ras_funcs(adev);
1614 gmc_v9_0_set_gfxhub_funcs(adev);
1615 gmc_v9_0_set_hdp_ras_funcs(adev);
1616 gmc_v9_0_set_mca_ras_funcs(adev);
1617 gmc_v9_0_set_xgmi_ras_funcs(adev);
1619 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1620 adev->gmc.shared_aperture_end =
1621 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1622 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1623 adev->gmc.private_aperture_end =
1624 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1629 static int gmc_v9_0_late_init(void *handle)
1631 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1634 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1639 * Workaround performance drop issue with VBIOS enables partial
1640 * writes, while disables HBM ECC for vega10.
1642 if (!amdgpu_sriov_vf(adev) &&
1643 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
1644 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1645 if (adev->df.funcs &&
1646 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1647 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1651 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1652 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
1653 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
1654 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1656 if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
1657 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
1658 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1661 r = amdgpu_gmc_ras_late_init(adev);
1665 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1668 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1669 struct amdgpu_gmc *mc)
1671 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1673 /* add the xgmi offset of the physical node */
1674 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1675 if (adev->gmc.xgmi.connected_to_cpu) {
1676 amdgpu_gmc_sysvm_location(adev, mc);
1678 amdgpu_gmc_vram_location(adev, mc, base);
1679 amdgpu_gmc_gart_location(adev, mc);
1680 amdgpu_gmc_agp_location(adev, mc);
1682 /* base offset of vram pages */
1683 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1685 /* XXX: add the xgmi offset of the physical node? */
1686 adev->vm_manager.vram_base_offset +=
1687 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1691 * gmc_v9_0_mc_init - initialize the memory controller driver params
1693 * @adev: amdgpu_device pointer
1695 * Look up the amount of vram, vram width, and decide how to place
1696 * vram and gart within the GPU's physical address space.
1697 * Returns 0 for success.
1699 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1703 /* size in MB on si */
1704 if (!adev->gmc.is_app_apu) {
1705 adev->gmc.mc_vram_size =
1706 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1708 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1709 adev->gmc.mc_vram_size = 0;
1711 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1713 if (!(adev->flags & AMD_IS_APU) &&
1714 !adev->gmc.xgmi.connected_to_cpu) {
1715 r = amdgpu_device_resize_fb_bar(adev);
1719 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1720 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1722 #ifdef CONFIG_X86_64
1724 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1725 * interface can use VRAM through here as it appears system reserved
1726 * memory in host address space.
1728 * For APUs, VRAM is just the stolen system memory and can be accessed
1731 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1734 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1735 if ((!amdgpu_sriov_vf(adev) &&
1736 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1737 (adev->gmc.xgmi.supported &&
1738 adev->gmc.xgmi.connected_to_cpu)) {
1739 adev->gmc.aper_base =
1740 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1741 adev->gmc.xgmi.physical_node_id *
1742 adev->gmc.xgmi.node_segment_size;
1743 adev->gmc.aper_size = adev->gmc.real_vram_size;
1747 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1749 /* set the gart size */
1750 if (amdgpu_gart_size == -1) {
1751 switch (adev->ip_versions[GC_HWIP][0]) {
1752 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1753 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1754 case IP_VERSION(9, 4, 0):
1755 case IP_VERSION(9, 4, 1):
1756 case IP_VERSION(9, 4, 2):
1757 case IP_VERSION(9, 4, 3):
1759 adev->gmc.gart_size = 512ULL << 20;
1761 case IP_VERSION(9, 1, 0): /* DCE SG support */
1762 case IP_VERSION(9, 2, 2): /* DCE SG support */
1763 case IP_VERSION(9, 3, 0):
1764 adev->gmc.gart_size = 1024ULL << 20;
1768 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1771 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1773 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1778 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1782 if (adev->gart.bo) {
1783 WARN(1, "VEGA10 PCIE GART already initialized\n");
1787 if (adev->gmc.xgmi.connected_to_cpu) {
1788 adev->gmc.vmid0_page_table_depth = 1;
1789 adev->gmc.vmid0_page_table_block_size = 12;
1791 adev->gmc.vmid0_page_table_depth = 0;
1792 adev->gmc.vmid0_page_table_block_size = 0;
1795 /* Initialize common gart structure */
1796 r = amdgpu_gart_init(adev);
1799 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1800 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1801 AMDGPU_PTE_EXECUTABLE;
1803 if (!adev->gmc.real_vram_size) {
1804 dev_info(adev->dev, "Put GART in system memory for APU\n");
1805 r = amdgpu_gart_table_ram_alloc(adev);
1807 dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1809 r = amdgpu_gart_table_vram_alloc(adev);
1813 if (adev->gmc.xgmi.connected_to_cpu)
1814 r = amdgpu_gmc_pdb0_alloc(adev);
1821 * gmc_v9_0_save_registers - saves regs
1823 * @adev: amdgpu_device pointer
1825 * This saves potential register values that should be
1826 * restored upon resume
1828 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1830 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1831 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1832 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1835 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1837 enum amdgpu_memory_partition mode;
1841 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1843 /* Mode detected by hardware not present in supported modes */
1844 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1845 !(BIT(mode - 1) & supp_modes))
1849 case UNKNOWN_MEMORY_PARTITION_MODE:
1850 case AMDGPU_NPS1_PARTITION_MODE:
1851 valid = (adev->gmc.num_mem_partitions == 1);
1853 case AMDGPU_NPS2_PARTITION_MODE:
1854 valid = (adev->gmc.num_mem_partitions == 2);
1856 case AMDGPU_NPS4_PARTITION_MODE:
1857 valid = (adev->gmc.num_mem_partitions == 3 ||
1858 adev->gmc.num_mem_partitions == 4);
1867 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1871 /* Check if node with id 'nid' is present in 'node_ids' array */
1872 for (i = 0; i < num_ids; ++i)
1873 if (node_ids[i] == nid)
1880 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1881 struct amdgpu_mem_partition_info *mem_ranges)
1883 int num_ranges = 0, ret, mem_groups;
1884 struct amdgpu_numa_info numa_info;
1885 int node_ids[MAX_MEM_RANGES];
1886 int num_xcc, xcc_id;
1889 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1890 xcc_mask = (1U << num_xcc) - 1;
1891 mem_groups = hweight32(adev->aid_mask);
1893 for_each_inst(xcc_id, xcc_mask) {
1894 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1898 if (numa_info.nid == NUMA_NO_NODE) {
1899 mem_ranges[0].size = numa_info.size;
1900 mem_ranges[0].numa.node = numa_info.nid;
1905 if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1909 node_ids[num_ranges] = numa_info.nid;
1910 mem_ranges[num_ranges].numa.node = numa_info.nid;
1911 mem_ranges[num_ranges].size = numa_info.size;
1915 adev->gmc.num_mem_partitions = num_ranges;
1917 /* If there is only partition, don't use entire size */
1918 if (adev->gmc.num_mem_partitions == 1) {
1919 mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1);
1920 do_div(mem_ranges[0].size, mem_groups);
1925 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1926 struct amdgpu_mem_partition_info *mem_ranges)
1928 enum amdgpu_memory_partition mode;
1929 u32 start_addr = 0, size;
1932 mode = gmc_v9_0_query_memory_partition(adev);
1935 case UNKNOWN_MEMORY_PARTITION_MODE:
1936 case AMDGPU_NPS1_PARTITION_MODE:
1937 adev->gmc.num_mem_partitions = 1;
1939 case AMDGPU_NPS2_PARTITION_MODE:
1940 adev->gmc.num_mem_partitions = 2;
1942 case AMDGPU_NPS4_PARTITION_MODE:
1943 if (adev->flags & AMD_IS_APU)
1944 adev->gmc.num_mem_partitions = 3;
1946 adev->gmc.num_mem_partitions = 4;
1949 adev->gmc.num_mem_partitions = 1;
1953 size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
1954 size /= adev->gmc.num_mem_partitions;
1956 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1957 mem_ranges[i].range.fpfn = start_addr;
1958 mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1959 mem_ranges[i].range.lpfn = start_addr + size - 1;
1963 /* Adjust the last one */
1964 mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
1965 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1966 mem_ranges[adev->gmc.num_mem_partitions - 1].size =
1967 adev->gmc.real_vram_size -
1968 ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
1969 << AMDGPU_GPU_PAGE_SHIFT);
1972 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1976 adev->gmc.mem_partitions = kzalloc(
1977 MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info),
1980 if (!adev->gmc.mem_partitions)
1983 /* TODO : Get the range from PSP/Discovery for dGPU */
1984 if (adev->gmc.is_app_apu)
1985 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1987 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1989 if (amdgpu_sriov_vf(adev))
1992 valid = gmc_v9_0_validate_partition_info(adev);
1994 /* TODO: handle invalid case */
1996 "Mem ranges not matching with hardware config");
2002 static int gmc_v9_0_sw_init(void *handle)
2004 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
2005 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2006 unsigned long inst_mask = adev->aid_mask;
2008 adev->gfxhub.funcs->init(adev);
2010 adev->mmhub.funcs->init(adev);
2012 spin_lock_init(&adev->gmc.invalidate_lock);
2014 if (!(adev->bios) || adev->gmc.is_app_apu) {
2015 if (adev->flags & AMD_IS_APU) {
2016 if (adev->gmc.is_app_apu) {
2017 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2018 adev->gmc.vram_width = 128 * 64;
2020 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
2021 adev->gmc.vram_width = 64 * 64;
2024 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2025 adev->gmc.vram_width = 128 * 64;
2028 r = amdgpu_atomfirmware_get_vram_info(adev,
2029 &vram_width, &vram_type, &vram_vendor);
2030 if (amdgpu_sriov_vf(adev))
2031 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
2032 * and DF related registers is not readable, seems hardcord is the
2033 * only way to set the correct vram_width
2035 adev->gmc.vram_width = 2048;
2036 else if (amdgpu_emu_mode != 1)
2037 adev->gmc.vram_width = vram_width;
2039 if (!adev->gmc.vram_width) {
2040 int chansize, numchan;
2042 /* hbm memory channel size */
2043 if (adev->flags & AMD_IS_APU)
2047 if (adev->df.funcs &&
2048 adev->df.funcs->get_hbm_channel_number) {
2049 numchan = adev->df.funcs->get_hbm_channel_number(adev);
2050 adev->gmc.vram_width = numchan * chansize;
2054 adev->gmc.vram_type = vram_type;
2055 adev->gmc.vram_vendor = vram_vendor;
2057 switch (adev->ip_versions[GC_HWIP][0]) {
2058 case IP_VERSION(9, 1, 0):
2059 case IP_VERSION(9, 2, 2):
2060 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2061 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2063 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2064 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2066 /* vm_size is 128TB + 512GB for legacy 3-level page support */
2067 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2068 adev->gmc.translate_further =
2069 adev->vm_manager.num_level > 1;
2072 case IP_VERSION(9, 0, 1):
2073 case IP_VERSION(9, 2, 1):
2074 case IP_VERSION(9, 4, 0):
2075 case IP_VERSION(9, 3, 0):
2076 case IP_VERSION(9, 4, 2):
2077 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2078 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2081 * To fulfill 4-level page support,
2082 * vm size is 256TB (48bit), maximum size of Vega10,
2083 * block size 512 (9bit)
2085 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
2086 if (amdgpu_sriov_vf(adev))
2087 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
2089 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2090 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
2091 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2093 case IP_VERSION(9, 4, 1):
2094 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2095 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2096 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2098 /* Keep the vm size same with Vega20 */
2099 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2100 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2102 case IP_VERSION(9, 4, 3):
2103 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2104 NUM_XCC(adev->gfx.xcc_mask));
2106 inst_mask <<= AMDGPU_MMHUB0(0);
2107 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2109 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2110 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2116 /* This interrupt is VMC page fault.*/
2117 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2118 &adev->gmc.vm_fault);
2122 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
2123 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2124 &adev->gmc.vm_fault);
2129 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2130 &adev->gmc.vm_fault);
2135 if (!amdgpu_sriov_vf(adev) &&
2136 !adev->gmc.xgmi.connected_to_cpu) {
2137 /* interrupt sent to DF. */
2138 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2139 &adev->gmc.ecc_irq);
2144 /* Set the internal MC address mask
2145 * This is the max address of the GPU's
2146 * internal address space.
2148 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2150 dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
2151 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2153 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
2156 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2158 r = gmc_v9_0_mc_init(adev);
2162 amdgpu_gmc_get_vbios_allocations(adev);
2164 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
2165 r = gmc_v9_0_init_mem_ranges(adev);
2170 /* Memory manager */
2171 r = amdgpu_bo_init(adev);
2175 r = gmc_v9_0_gart_init(adev);
2181 * VMID 0 is reserved for System
2182 * amdgpu graphics/compute will use VMIDs 1..n-1
2183 * amdkfd will use VMIDs n..15
2185 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2186 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2187 * for video processing.
2189 adev->vm_manager.first_kfd_vmid =
2190 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
2191 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
2192 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;
2194 amdgpu_vm_manager_init(adev);
2196 gmc_v9_0_save_registers(adev);
2198 r = amdgpu_gmc_ras_sw_init(adev);
2202 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2203 amdgpu_gmc_sysfs_init(adev);
2208 static int gmc_v9_0_sw_fini(void *handle)
2210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2212 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2213 amdgpu_gmc_sysfs_fini(adev);
2214 adev->gmc.num_mem_partitions = 0;
2215 kfree(adev->gmc.mem_partitions);
2217 amdgpu_gmc_ras_fini(adev);
2218 amdgpu_gem_force_release(adev);
2219 amdgpu_vm_manager_fini(adev);
2220 if (!adev->gmc.real_vram_size) {
2221 dev_info(adev->dev, "Put GART in system memory for APU free\n");
2222 amdgpu_gart_table_ram_free(adev);
2224 amdgpu_gart_table_vram_free(adev);
2226 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2227 amdgpu_bo_fini(adev);
2232 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2235 switch (adev->ip_versions[MMHUB_HWIP][0]) {
2236 case IP_VERSION(9, 0, 0):
2237 if (amdgpu_sriov_vf(adev))
2240 case IP_VERSION(9, 4, 0):
2241 soc15_program_register_sequence(adev,
2242 golden_settings_mmhub_1_0_0,
2243 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2244 soc15_program_register_sequence(adev,
2245 golden_settings_athub_1_0_0,
2246 ARRAY_SIZE(golden_settings_athub_1_0_0));
2248 case IP_VERSION(9, 1, 0):
2249 case IP_VERSION(9, 2, 0):
2250 /* TODO for renoir */
2251 soc15_program_register_sequence(adev,
2252 golden_settings_athub_1_0_0,
2253 ARRAY_SIZE(golden_settings_athub_1_0_0));
2261 * gmc_v9_0_restore_registers - restores regs
2263 * @adev: amdgpu_device pointer
2265 * This restores register values, saved at suspend.
2267 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2269 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
2270 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
2271 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2272 WARN_ON(adev->gmc.sdpif_register !=
2273 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2278 * gmc_v9_0_gart_enable - gart enable
2280 * @adev: amdgpu_device pointer
2282 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2286 if (adev->gmc.xgmi.connected_to_cpu)
2287 amdgpu_gmc_init_pdb0(adev);
2289 if (adev->gart.bo == NULL) {
2290 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2294 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2296 if (!adev->in_s0ix) {
2297 r = adev->gfxhub.funcs->gart_enable(adev);
2302 r = adev->mmhub.funcs->gart_enable(adev);
2306 DRM_INFO("PCIE GART of %uM enabled.\n",
2307 (unsigned)(adev->gmc.gart_size >> 20));
2308 if (adev->gmc.pdb0_bo)
2309 DRM_INFO("PDB0 located at 0x%016llX\n",
2310 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2311 DRM_INFO("PTB located at 0x%016llX\n",
2312 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2317 static int gmc_v9_0_hw_init(void *handle)
2319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2323 /* The sequence of these two function calls matters.*/
2324 gmc_v9_0_init_golden_registers(adev);
2326 if (adev->mode_info.num_crtc) {
2327 /* Lockout access through VGA aperture*/
2328 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2329 /* disable VGA render */
2330 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2333 if (adev->mmhub.funcs->update_power_gating)
2334 adev->mmhub.funcs->update_power_gating(adev, true);
2336 adev->hdp.funcs->init_registers(adev);
2338 /* After HDP is initialized, flush HDP.*/
2339 adev->hdp.funcs->flush_hdp(adev, NULL);
2341 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2346 if (!amdgpu_sriov_vf(adev)) {
2348 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2349 adev->mmhub.funcs->set_fault_enable_default(adev, value);
2351 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2352 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2354 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2357 if (adev->umc.funcs && adev->umc.funcs->init_registers)
2358 adev->umc.funcs->init_registers(adev);
2360 r = gmc_v9_0_gart_enable(adev);
2364 if (amdgpu_emu_mode == 1)
2365 return amdgpu_gmc_vram_checking(adev);
2371 * gmc_v9_0_gart_disable - gart disable
2373 * @adev: amdgpu_device pointer
2375 * This disables all VM page table.
2377 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2380 adev->gfxhub.funcs->gart_disable(adev);
2381 adev->mmhub.funcs->gart_disable(adev);
2384 static int gmc_v9_0_hw_fini(void *handle)
2386 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2388 gmc_v9_0_gart_disable(adev);
2390 if (amdgpu_sriov_vf(adev)) {
2391 /* full access mode, so don't touch any GMC register */
2392 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2397 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2398 * a correct cached state for GMC. Otherwise, the "gate" again
2399 * operation on S3 resuming will fail due to wrong cached state.
2401 if (adev->mmhub.funcs->update_power_gating)
2402 adev->mmhub.funcs->update_power_gating(adev, false);
2404 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2409 static int gmc_v9_0_suspend(void *handle)
2411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2413 return gmc_v9_0_hw_fini(adev);
2416 static int gmc_v9_0_resume(void *handle)
2419 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2421 r = gmc_v9_0_hw_init(adev);
2425 amdgpu_vmid_reset_all(adev);
2430 static bool gmc_v9_0_is_idle(void *handle)
2432 /* MC is always ready in GMC v9.*/
2436 static int gmc_v9_0_wait_for_idle(void *handle)
2438 /* There is no need to wait for MC idle in GMC v9.*/
2442 static int gmc_v9_0_soft_reset(void *handle)
2444 /* XXX for emulation.*/
2448 static int gmc_v9_0_set_clockgating_state(void *handle,
2449 enum amd_clockgating_state state)
2451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2453 adev->mmhub.funcs->set_clockgating(adev, state);
2455 athub_v1_0_set_clockgating(adev, state);
2460 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2464 adev->mmhub.funcs->get_clockgating(adev, flags);
2466 athub_v1_0_get_clockgating(adev, flags);
2469 static int gmc_v9_0_set_powergating_state(void *handle,
2470 enum amd_powergating_state state)
2475 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2477 .early_init = gmc_v9_0_early_init,
2478 .late_init = gmc_v9_0_late_init,
2479 .sw_init = gmc_v9_0_sw_init,
2480 .sw_fini = gmc_v9_0_sw_fini,
2481 .hw_init = gmc_v9_0_hw_init,
2482 .hw_fini = gmc_v9_0_hw_fini,
2483 .suspend = gmc_v9_0_suspend,
2484 .resume = gmc_v9_0_resume,
2485 .is_idle = gmc_v9_0_is_idle,
2486 .wait_for_idle = gmc_v9_0_wait_for_idle,
2487 .soft_reset = gmc_v9_0_soft_reset,
2488 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2489 .set_powergating_state = gmc_v9_0_set_powergating_state,
2490 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2493 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
2495 .type = AMD_IP_BLOCK_TYPE_GMC,
2499 .funcs = &gmc_v9_0_ip_funcs,