2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
60 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
62 #include "amdgpu_ras.h"
63 #include "amdgpu_xgmi.h"
65 /* add these here since we already include dce12 headers and these are for DCN */
66 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
67 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
70 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
72 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
73 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
76 static const char *gfxhub_client_ids[] = {
92 static const char *mmhub_client_ids_raven[][2] = {
117 static const char *mmhub_client_ids_renoir[][2] = {
145 static const char *mmhub_client_ids_vega10[][2] = {
158 [32+14][0] = "SDMA0",
171 [32+4][1] = "DCEDWB",
174 [32+14][1] = "SDMA1",
177 static const char *mmhub_client_ids_vega12[][2] = {
190 [32+15][0] = "SDMA0",
200 [32+1][1] = "DCEDWB",
206 [32+15][1] = "SDMA1",
209 static const char *mmhub_client_ids_vega20[][2] = {
223 [32+12][0] = "UTCL2",
224 [32+14][0] = "SDMA1",
242 [32+14][1] = "SDMA1",
245 static const char *mmhub_client_ids_arcturus[][2] = {
286 static const char *mmhub_client_ids_aldebaran[][2] = {
289 [32+1][0] = "DBGU_IO0",
290 [32+2][0] = "DBGU_IO2",
292 [96+11][0] = "JPEG0",
294 [96+13][0] = "VCNU0",
295 [128+11][0] = "JPEG1",
296 [128+12][0] = "VCN1",
297 [128+13][0] = "VCNU1",
300 [256+0][0] = "SDMA0",
301 [256+1][0] = "SDMA1",
302 [256+2][0] = "SDMA2",
303 [256+3][0] = "SDMA3",
304 [256+4][0] = "SDMA4",
308 [32+1][1] = "DBGU_IO0",
309 [32+2][1] = "DBGU_IO2",
311 [96+11][1] = "JPEG0",
313 [96+13][1] = "VCNU0",
314 [128+11][1] = "JPEG1",
315 [128+12][1] = "VCN1",
316 [128+13][1] = "VCNU1",
319 [256+0][1] = "SDMA0",
320 [256+1][1] = "SDMA1",
321 [256+2][1] = "SDMA2",
322 [256+3][1] = "SDMA3",
323 [256+4][1] = "SDMA4",
327 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
329 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
330 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
333 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
335 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
336 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
339 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
340 (0x000143c0 + 0x00000000),
341 (0x000143c0 + 0x00000800),
342 (0x000143c0 + 0x00001000),
343 (0x000143c0 + 0x00001800),
344 (0x000543c0 + 0x00000000),
345 (0x000543c0 + 0x00000800),
346 (0x000543c0 + 0x00001000),
347 (0x000543c0 + 0x00001800),
348 (0x000943c0 + 0x00000000),
349 (0x000943c0 + 0x00000800),
350 (0x000943c0 + 0x00001000),
351 (0x000943c0 + 0x00001800),
352 (0x000d43c0 + 0x00000000),
353 (0x000d43c0 + 0x00000800),
354 (0x000d43c0 + 0x00001000),
355 (0x000d43c0 + 0x00001800),
356 (0x001143c0 + 0x00000000),
357 (0x001143c0 + 0x00000800),
358 (0x001143c0 + 0x00001000),
359 (0x001143c0 + 0x00001800),
360 (0x001543c0 + 0x00000000),
361 (0x001543c0 + 0x00000800),
362 (0x001543c0 + 0x00001000),
363 (0x001543c0 + 0x00001800),
364 (0x001943c0 + 0x00000000),
365 (0x001943c0 + 0x00000800),
366 (0x001943c0 + 0x00001000),
367 (0x001943c0 + 0x00001800),
368 (0x001d43c0 + 0x00000000),
369 (0x001d43c0 + 0x00000800),
370 (0x001d43c0 + 0x00001000),
371 (0x001d43c0 + 0x00001800),
374 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
375 (0x000143e0 + 0x00000000),
376 (0x000143e0 + 0x00000800),
377 (0x000143e0 + 0x00001000),
378 (0x000143e0 + 0x00001800),
379 (0x000543e0 + 0x00000000),
380 (0x000543e0 + 0x00000800),
381 (0x000543e0 + 0x00001000),
382 (0x000543e0 + 0x00001800),
383 (0x000943e0 + 0x00000000),
384 (0x000943e0 + 0x00000800),
385 (0x000943e0 + 0x00001000),
386 (0x000943e0 + 0x00001800),
387 (0x000d43e0 + 0x00000000),
388 (0x000d43e0 + 0x00000800),
389 (0x000d43e0 + 0x00001000),
390 (0x000d43e0 + 0x00001800),
391 (0x001143e0 + 0x00000000),
392 (0x001143e0 + 0x00000800),
393 (0x001143e0 + 0x00001000),
394 (0x001143e0 + 0x00001800),
395 (0x001543e0 + 0x00000000),
396 (0x001543e0 + 0x00000800),
397 (0x001543e0 + 0x00001000),
398 (0x001543e0 + 0x00001800),
399 (0x001943e0 + 0x00000000),
400 (0x001943e0 + 0x00000800),
401 (0x001943e0 + 0x00001000),
402 (0x001943e0 + 0x00001800),
403 (0x001d43e0 + 0x00000000),
404 (0x001d43e0 + 0x00000800),
405 (0x001d43e0 + 0x00001000),
406 (0x001d43e0 + 0x00001800),
409 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
410 struct amdgpu_irq_src *src,
412 enum amdgpu_interrupt_state state)
414 u32 bits, i, tmp, reg;
416 /* Devices newer then VEGA10/12 shall have these programming
417 sequences performed by PSP BL */
418 if (adev->asic_type >= CHIP_VEGA20)
424 case AMDGPU_IRQ_STATE_DISABLE:
425 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
426 reg = ecc_umc_mcumc_ctrl_addrs[i];
431 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
432 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
438 case AMDGPU_IRQ_STATE_ENABLE:
439 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
440 reg = ecc_umc_mcumc_ctrl_addrs[i];
445 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
446 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
459 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
460 struct amdgpu_irq_src *src,
462 enum amdgpu_interrupt_state state)
464 struct amdgpu_vmhub *hub;
465 u32 tmp, reg, bits, i, j;
467 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
468 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
469 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
470 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
471 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
476 case AMDGPU_IRQ_STATE_DISABLE:
477 for (j = 0; j < adev->num_vmhubs; j++) {
478 hub = &adev->vmhub[j];
479 for (i = 0; i < 16; i++) {
480 reg = hub->vm_context0_cntl + i;
487 case AMDGPU_IRQ_STATE_ENABLE:
488 for (j = 0; j < adev->num_vmhubs; j++) {
489 hub = &adev->vmhub[j];
490 for (i = 0; i < 16; i++) {
491 reg = hub->vm_context0_cntl + i;
505 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
506 struct amdgpu_irq_src *source,
507 struct amdgpu_iv_entry *entry)
509 bool retry_fault = !!(entry->src_data[1] & 0x80);
510 bool write_fault = !!(entry->src_data[1] & 0x20);
511 uint32_t status = 0, cid = 0, rw = 0;
512 struct amdgpu_task_info task_info;
513 struct amdgpu_vmhub *hub;
514 const char *mmhub_cid;
515 const char *hub_name;
518 addr = (u64)entry->src_data[0] << 12;
519 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
522 /* Returning 1 here also prevents sending the IV to the KFD */
524 /* Process it onyl if it's the first fault for this address */
525 if (entry->ih != &adev->irq.ih_soft &&
526 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
530 /* Delegate it to a different ring if the hardware hasn't
533 if (entry->ih == &adev->irq.ih) {
534 amdgpu_irq_delegate(adev, entry, 8);
538 /* Try to handle the recoverable page faults by filling page
541 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
545 if (!printk_ratelimit())
548 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
550 hub = &adev->vmhub[AMDGPU_MMHUB_0];
551 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
553 hub = &adev->vmhub[AMDGPU_MMHUB_1];
555 hub_name = "gfxhub0";
556 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
559 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
560 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
563 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
564 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
565 hub_name, retry_fault ? "retry" : "no-retry",
566 entry->src_id, entry->ring_id, entry->vmid,
567 entry->pasid, task_info.process_name, task_info.tgid,
568 task_info.task_name, task_info.pid);
569 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
570 addr, entry->client_id,
571 soc15_ih_clientid_name[entry->client_id]);
573 if (amdgpu_sriov_vf(adev))
577 * Issue a dummy read to wait for the status register to
578 * be updated to avoid reading an incorrect value due to
579 * the new fast GRBM interface.
581 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
582 (adev->asic_type < CHIP_ALDEBARAN))
583 RREG32(hub->vm_l2_pro_fault_status);
585 status = RREG32(hub->vm_l2_pro_fault_status);
586 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
587 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
588 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
592 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
594 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
595 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
596 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
597 gfxhub_client_ids[cid],
600 switch (adev->asic_type) {
602 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
605 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
608 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
611 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
614 mmhub_cid = mmhub_client_ids_raven[cid][rw];
617 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
620 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
626 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
627 mmhub_cid ? mmhub_cid : "unknown", cid);
629 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
630 REG_GET_FIELD(status,
631 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
632 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
633 REG_GET_FIELD(status,
634 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
635 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
636 REG_GET_FIELD(status,
637 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
638 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
639 REG_GET_FIELD(status,
640 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
641 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
645 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
646 .set = gmc_v9_0_vm_fault_interrupt_state,
647 .process = gmc_v9_0_process_interrupt,
651 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
652 .set = gmc_v9_0_ecc_interrupt_state,
653 .process = amdgpu_umc_process_ecc_irq,
656 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
658 adev->gmc.vm_fault.num_types = 1;
659 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
661 if (!amdgpu_sriov_vf(adev) &&
662 !adev->gmc.xgmi.connected_to_cpu) {
663 adev->gmc.ecc_irq.num_types = 1;
664 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
668 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
673 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
674 PER_VMID_INVALIDATE_REQ, 1 << vmid);
675 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
676 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
677 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
678 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
679 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
680 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
681 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
682 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
688 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
690 * @adev: amdgpu_device pointer
694 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
697 if (adev->asic_type == CHIP_ALDEBARAN)
700 return ((vmhub == AMDGPU_MMHUB_0 ||
701 vmhub == AMDGPU_MMHUB_1) &&
702 (!amdgpu_sriov_vf(adev)) &&
703 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
704 (adev->apu_flags & AMD_APU_IS_PICASSO))));
707 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
708 uint8_t vmid, uint16_t *p_pasid)
712 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
714 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
716 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
721 * VMID 0 is the physical GPU addresses as used by the kernel.
722 * VMIDs 1-15 are used for userspace clients and are handled
723 * by the amdgpu vm/hsa code.
727 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
729 * @adev: amdgpu_device pointer
730 * @vmid: vm instance to flush
731 * @vmhub: which hub to flush
732 * @flush_type: the flush type
734 * Flush the TLB for the requested page table using certain type.
736 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
737 uint32_t vmhub, uint32_t flush_type)
739 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
740 const unsigned eng = 17;
741 u32 j, inv_req, inv_req2, tmp;
742 struct amdgpu_vmhub *hub;
744 BUG_ON(vmhub >= adev->num_vmhubs);
746 hub = &adev->vmhub[vmhub];
747 if (adev->gmc.xgmi.num_physical_nodes &&
748 adev->asic_type == CHIP_VEGA20) {
749 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
750 * heavy-weight TLB flush (type 2), which flushes
751 * both. Due to a race condition with concurrent
752 * memory accesses using the same TLB cache line, we
753 * still need a second TLB flush after this.
755 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
756 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
758 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
762 /* This is necessary for a HW workaround under SRIOV as well
763 * as GFXOFF under bare metal
765 if (adev->gfx.kiq.ring.sched.ready &&
766 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
767 down_read_trylock(&adev->reset_sem)) {
768 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
769 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
771 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
773 up_read(&adev->reset_sem);
777 spin_lock(&adev->gmc.invalidate_lock);
780 * It may lose gpuvm invalidate acknowldege state across power-gating
781 * off cycle, add semaphore acquire before invalidation and semaphore
782 * release after invalidation to avoid entering power gated state
786 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
788 for (j = 0; j < adev->usec_timeout; j++) {
789 /* a read return value of 1 means semaphore acuqire */
790 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
791 hub->eng_distance * eng);
797 if (j >= adev->usec_timeout)
798 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
802 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
803 hub->eng_distance * eng, inv_req);
806 * Issue a dummy read to wait for the ACK register to
807 * be cleared to avoid a false ACK due to the new fast
810 if ((vmhub == AMDGPU_GFXHUB_0) &&
811 (adev->asic_type < CHIP_ALDEBARAN))
812 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
813 hub->eng_distance * eng);
815 for (j = 0; j < adev->usec_timeout; j++) {
816 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
817 hub->eng_distance * eng);
818 if (tmp & (1 << vmid))
827 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
830 * add semaphore release after invalidation,
831 * write with 0 means semaphore release
833 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
834 hub->eng_distance * eng, 0);
836 spin_unlock(&adev->gmc.invalidate_lock);
838 if (j < adev->usec_timeout)
841 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
845 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
847 * @adev: amdgpu_device pointer
848 * @pasid: pasid to be flush
849 * @flush_type: the flush type
850 * @all_hub: flush all hubs
852 * Flush the TLB for the requested pasid.
854 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
855 uint16_t pasid, uint32_t flush_type,
861 uint16_t queried_pasid;
863 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
864 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
866 if (amdgpu_in_reset(adev))
869 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
870 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
871 * heavy-weight TLB flush (type 2), which flushes
872 * both. Due to a race condition with concurrent
873 * memory accesses using the same TLB cache line, we
874 * still need a second TLB flush after this.
876 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
877 adev->asic_type == CHIP_VEGA20);
878 /* 2 dwords flush + 8 dwords fence */
879 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
882 ndw += kiq->pmf->invalidate_tlbs_size;
884 spin_lock(&adev->gfx.kiq.ring_lock);
885 /* 2 dwords flush + 8 dwords fence */
886 amdgpu_ring_alloc(ring, ndw);
888 kiq->pmf->kiq_invalidate_tlbs(ring,
890 kiq->pmf->kiq_invalidate_tlbs(ring,
891 pasid, flush_type, all_hub);
892 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
894 amdgpu_ring_undo(ring);
895 spin_unlock(&adev->gfx.kiq.ring_lock);
896 up_read(&adev->reset_sem);
900 amdgpu_ring_commit(ring);
901 spin_unlock(&adev->gfx.kiq.ring_lock);
902 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
904 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
905 up_read(&adev->reset_sem);
908 up_read(&adev->reset_sem);
912 for (vmid = 1; vmid < 16; vmid++) {
914 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
916 if (ret && queried_pasid == pasid) {
918 for (i = 0; i < adev->num_vmhubs; i++)
919 gmc_v9_0_flush_gpu_tlb(adev, vmid,
922 gmc_v9_0_flush_gpu_tlb(adev, vmid,
923 AMDGPU_GFXHUB_0, flush_type);
933 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
934 unsigned vmid, uint64_t pd_addr)
936 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
937 struct amdgpu_device *adev = ring->adev;
938 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
939 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
940 unsigned eng = ring->vm_inv_eng;
943 * It may lose gpuvm invalidate acknowldege state across power-gating
944 * off cycle, add semaphore acquire before invalidation and semaphore
945 * release after invalidation to avoid entering power gated state
949 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
951 /* a read return value of 1 means semaphore acuqire */
952 amdgpu_ring_emit_reg_wait(ring,
953 hub->vm_inv_eng0_sem +
954 hub->eng_distance * eng, 0x1, 0x1);
956 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
957 (hub->ctx_addr_distance * vmid),
958 lower_32_bits(pd_addr));
960 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
961 (hub->ctx_addr_distance * vmid),
962 upper_32_bits(pd_addr));
964 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
965 hub->eng_distance * eng,
966 hub->vm_inv_eng0_ack +
967 hub->eng_distance * eng,
970 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
973 * add semaphore release after invalidation,
974 * write with 0 means semaphore release
976 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
977 hub->eng_distance * eng, 0);
982 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
985 struct amdgpu_device *adev = ring->adev;
988 /* Do nothing because there's no lut register for mmhub1. */
989 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
992 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
993 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
995 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
997 amdgpu_ring_emit_wreg(ring, reg, pasid);
1001 * PTE format on VEGA 10:
1010 * 47:12 4k physical page base address
1020 * PDE format on VEGA 10:
1021 * 63:59 block fragment size
1025 * 47:6 physical base address of PD or PTE
1032 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1036 case AMDGPU_VM_MTYPE_DEFAULT:
1037 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1038 case AMDGPU_VM_MTYPE_NC:
1039 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1040 case AMDGPU_VM_MTYPE_WC:
1041 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1042 case AMDGPU_VM_MTYPE_RW:
1043 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1044 case AMDGPU_VM_MTYPE_CC:
1045 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1046 case AMDGPU_VM_MTYPE_UC:
1047 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1049 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1053 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1054 uint64_t *addr, uint64_t *flags)
1056 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1057 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1058 BUG_ON(*addr & 0xFFFF00000000003FULL);
1060 if (!adev->gmc.translate_further)
1063 if (level == AMDGPU_VM_PDB1) {
1064 /* Set the block fragment size */
1065 if (!(*flags & AMDGPU_PDE_PTE))
1066 *flags |= AMDGPU_PDE_BFS(0x9);
1068 } else if (level == AMDGPU_VM_PDB0) {
1069 if (*flags & AMDGPU_PDE_PTE)
1070 *flags &= ~AMDGPU_PDE_PTE;
1072 *flags |= AMDGPU_PTE_TF;
1076 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1077 struct amdgpu_bo_va_mapping *mapping,
1080 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1081 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1083 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1084 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1086 if (mapping->flags & AMDGPU_PTE_PRT) {
1087 *flags |= AMDGPU_PTE_PRT;
1088 *flags &= ~AMDGPU_PTE_VALID;
1091 if ((adev->asic_type == CHIP_ARCTURUS ||
1092 adev->asic_type == CHIP_ALDEBARAN) &&
1093 !(*flags & AMDGPU_PTE_SYSTEM) &&
1094 mapping->bo_va->is_xgmi)
1095 *flags |= AMDGPU_PTE_SNOOPED;
1097 if (adev->asic_type == CHIP_ALDEBARAN)
1098 *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1101 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1103 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1106 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1107 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1111 switch (adev->asic_type) {
1114 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1115 size = (REG_GET_FIELD(viewport,
1116 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1117 REG_GET_FIELD(viewport,
1118 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1125 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1126 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1127 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1136 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1137 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1138 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1139 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1140 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1141 .map_mtype = gmc_v9_0_map_mtype,
1142 .get_vm_pde = gmc_v9_0_get_vm_pde,
1143 .get_vm_pte = gmc_v9_0_get_vm_pte,
1144 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1147 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1149 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1152 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1154 switch (adev->asic_type) {
1156 adev->umc.funcs = &umc_v6_0_funcs;
1159 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1160 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1161 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1162 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1163 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1164 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1167 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1168 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1169 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1170 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1171 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1172 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1174 case CHIP_ALDEBARAN:
1175 adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
1176 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1177 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1178 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1179 if (!adev->gmc.xgmi.connected_to_cpu)
1180 adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
1181 if (1 & adev->smuio.funcs->get_die_id(adev))
1182 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1184 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1191 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1193 switch (adev->asic_type) {
1195 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1197 case CHIP_ALDEBARAN:
1198 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1201 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1206 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1208 switch (adev->asic_type) {
1210 adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
1213 adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
1215 case CHIP_ALDEBARAN:
1216 adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
1219 /* mmhub ras is not available */
1224 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1226 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1229 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1231 adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
1234 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
1236 switch (adev->asic_type) {
1237 case CHIP_ALDEBARAN:
1238 if (!adev->gmc.xgmi.connected_to_cpu)
1239 adev->mca.funcs = &mca_v3_0_funcs;
1246 static int gmc_v9_0_early_init(void *handle)
1248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250 if (adev->asic_type == CHIP_VEGA20 ||
1251 adev->asic_type == CHIP_ARCTURUS)
1252 adev->gmc.xgmi.supported = true;
1254 if (adev->asic_type == CHIP_ALDEBARAN) {
1255 adev->gmc.xgmi.supported = true;
1256 adev->gmc.xgmi.connected_to_cpu =
1257 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1260 gmc_v9_0_set_gmc_funcs(adev);
1261 gmc_v9_0_set_irq_funcs(adev);
1262 gmc_v9_0_set_umc_funcs(adev);
1263 gmc_v9_0_set_mmhub_funcs(adev);
1264 gmc_v9_0_set_mmhub_ras_funcs(adev);
1265 gmc_v9_0_set_gfxhub_funcs(adev);
1266 gmc_v9_0_set_hdp_ras_funcs(adev);
1267 gmc_v9_0_set_mca_funcs(adev);
1269 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1270 adev->gmc.shared_aperture_end =
1271 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1272 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1273 adev->gmc.private_aperture_end =
1274 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1279 static int gmc_v9_0_late_init(void *handle)
1281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1289 * Workaround performance drop issue with VBIOS enables partial
1290 * writes, while disables HBM ECC for vega10.
1292 if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1293 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1294 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1295 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1299 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1300 if (adev->mmhub.ras_funcs &&
1301 adev->mmhub.ras_funcs->reset_ras_error_count)
1302 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
1304 if (adev->hdp.ras_funcs &&
1305 adev->hdp.ras_funcs->reset_ras_error_count)
1306 adev->hdp.ras_funcs->reset_ras_error_count(adev);
1309 r = amdgpu_gmc_ras_late_init(adev);
1313 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1316 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1317 struct amdgpu_gmc *mc)
1319 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1321 /* add the xgmi offset of the physical node */
1322 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1323 if (adev->gmc.xgmi.connected_to_cpu) {
1324 amdgpu_gmc_sysvm_location(adev, mc);
1326 amdgpu_gmc_vram_location(adev, mc, base);
1327 amdgpu_gmc_gart_location(adev, mc);
1328 amdgpu_gmc_agp_location(adev, mc);
1330 /* base offset of vram pages */
1331 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1333 /* XXX: add the xgmi offset of the physical node? */
1334 adev->vm_manager.vram_base_offset +=
1335 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1339 * gmc_v9_0_mc_init - initialize the memory controller driver params
1341 * @adev: amdgpu_device pointer
1343 * Look up the amount of vram, vram width, and decide how to place
1344 * vram and gart within the GPU's physical address space.
1345 * Returns 0 for success.
1347 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1351 /* size in MB on si */
1352 adev->gmc.mc_vram_size =
1353 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1354 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1356 if (!(adev->flags & AMD_IS_APU) &&
1357 !adev->gmc.xgmi.connected_to_cpu) {
1358 r = amdgpu_device_resize_fb_bar(adev);
1362 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1363 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1365 #ifdef CONFIG_X86_64
1367 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1368 * interface can use VRAM through here as it appears system reserved
1369 * memory in host address space.
1371 * For APUs, VRAM is just the stolen system memory and can be accessed
1374 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1377 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1378 if ((adev->flags & AMD_IS_APU) ||
1379 (adev->gmc.xgmi.supported &&
1380 adev->gmc.xgmi.connected_to_cpu)) {
1381 adev->gmc.aper_base =
1382 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1383 adev->gmc.xgmi.physical_node_id *
1384 adev->gmc.xgmi.node_segment_size;
1385 adev->gmc.aper_size = adev->gmc.real_vram_size;
1389 /* In case the PCI BAR is larger than the actual amount of vram */
1390 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1391 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1392 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1394 /* set the gart size */
1395 if (amdgpu_gart_size == -1) {
1396 switch (adev->asic_type) {
1397 case CHIP_VEGA10: /* all engines support GPUVM */
1398 case CHIP_VEGA12: /* all engines support GPUVM */
1401 case CHIP_ALDEBARAN:
1403 adev->gmc.gart_size = 512ULL << 20;
1405 case CHIP_RAVEN: /* DCE SG support */
1407 adev->gmc.gart_size = 1024ULL << 20;
1411 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1414 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1416 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1421 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1425 if (adev->gart.bo) {
1426 WARN(1, "VEGA10 PCIE GART already initialized\n");
1430 if (adev->gmc.xgmi.connected_to_cpu) {
1431 adev->gmc.vmid0_page_table_depth = 1;
1432 adev->gmc.vmid0_page_table_block_size = 12;
1434 adev->gmc.vmid0_page_table_depth = 0;
1435 adev->gmc.vmid0_page_table_block_size = 0;
1438 /* Initialize common gart structure */
1439 r = amdgpu_gart_init(adev);
1442 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1443 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1444 AMDGPU_PTE_EXECUTABLE;
1446 r = amdgpu_gart_table_vram_alloc(adev);
1450 if (adev->gmc.xgmi.connected_to_cpu) {
1451 r = amdgpu_gmc_pdb0_alloc(adev);
1458 * gmc_v9_0_save_registers - saves regs
1460 * @adev: amdgpu_device pointer
1462 * This saves potential register values that should be
1463 * restored upon resume
1465 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1467 if (adev->asic_type == CHIP_RAVEN)
1468 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1471 static int gmc_v9_0_sw_init(void *handle)
1473 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1474 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1476 adev->gfxhub.funcs->init(adev);
1478 adev->mmhub.funcs->init(adev);
1479 if (adev->mca.funcs)
1480 adev->mca.funcs->init(adev);
1482 spin_lock_init(&adev->gmc.invalidate_lock);
1484 r = amdgpu_atomfirmware_get_vram_info(adev,
1485 &vram_width, &vram_type, &vram_vendor);
1486 if (amdgpu_sriov_vf(adev))
1487 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1488 * and DF related registers is not readable, seems hardcord is the
1489 * only way to set the correct vram_width
1491 adev->gmc.vram_width = 2048;
1492 else if (amdgpu_emu_mode != 1)
1493 adev->gmc.vram_width = vram_width;
1495 if (!adev->gmc.vram_width) {
1496 int chansize, numchan;
1498 /* hbm memory channel size */
1499 if (adev->flags & AMD_IS_APU)
1504 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1505 adev->gmc.vram_width = numchan * chansize;
1508 adev->gmc.vram_type = vram_type;
1509 adev->gmc.vram_vendor = vram_vendor;
1510 switch (adev->asic_type) {
1512 adev->num_vmhubs = 2;
1514 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1515 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1517 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1518 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1519 adev->gmc.translate_further =
1520 adev->vm_manager.num_level > 1;
1527 case CHIP_ALDEBARAN:
1528 adev->num_vmhubs = 2;
1532 * To fulfill 4-level page support,
1533 * vm size is 256TB (48bit), maximum size of Vega10,
1534 * block size 512 (9bit)
1536 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1537 if (amdgpu_sriov_vf(adev))
1538 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1540 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1543 adev->num_vmhubs = 3;
1545 /* Keep the vm size same with Vega20 */
1546 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1552 /* This interrupt is VMC page fault.*/
1553 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1554 &adev->gmc.vm_fault);
1558 if (adev->asic_type == CHIP_ARCTURUS) {
1559 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1560 &adev->gmc.vm_fault);
1565 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1566 &adev->gmc.vm_fault);
1571 if (!amdgpu_sriov_vf(adev) &&
1572 !adev->gmc.xgmi.connected_to_cpu) {
1573 /* interrupt sent to DF. */
1574 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1575 &adev->gmc.ecc_irq);
1580 /* Set the internal MC address mask
1581 * This is the max address of the GPU's
1582 * internal address space.
1584 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1586 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1588 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1591 adev->need_swiotlb = drm_need_swiotlb(44);
1593 if (adev->gmc.xgmi.supported) {
1594 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1599 r = gmc_v9_0_mc_init(adev);
1603 amdgpu_gmc_get_vbios_allocations(adev);
1605 /* Memory manager */
1606 r = amdgpu_bo_init(adev);
1610 r = gmc_v9_0_gart_init(adev);
1616 * VMID 0 is reserved for System
1617 * amdgpu graphics/compute will use VMIDs 1..n-1
1618 * amdkfd will use VMIDs n..15
1620 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1621 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1622 * for video processing.
1624 adev->vm_manager.first_kfd_vmid =
1625 (adev->asic_type == CHIP_ARCTURUS ||
1626 adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
1628 amdgpu_vm_manager_init(adev);
1630 gmc_v9_0_save_registers(adev);
1635 static int gmc_v9_0_sw_fini(void *handle)
1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1639 amdgpu_gmc_ras_fini(adev);
1640 amdgpu_gem_force_release(adev);
1641 amdgpu_vm_manager_fini(adev);
1642 amdgpu_gart_table_vram_free(adev);
1643 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
1644 amdgpu_bo_fini(adev);
1649 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1652 switch (adev->asic_type) {
1654 if (amdgpu_sriov_vf(adev))
1658 soc15_program_register_sequence(adev,
1659 golden_settings_mmhub_1_0_0,
1660 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1661 soc15_program_register_sequence(adev,
1662 golden_settings_athub_1_0_0,
1663 ARRAY_SIZE(golden_settings_athub_1_0_0));
1668 /* TODO for renoir */
1669 soc15_program_register_sequence(adev,
1670 golden_settings_athub_1_0_0,
1671 ARRAY_SIZE(golden_settings_athub_1_0_0));
1679 * gmc_v9_0_restore_registers - restores regs
1681 * @adev: amdgpu_device pointer
1683 * This restores register values, saved at suspend.
1685 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1687 if (adev->asic_type == CHIP_RAVEN) {
1688 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1689 WARN_ON(adev->gmc.sdpif_register !=
1690 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1695 * gmc_v9_0_gart_enable - gart enable
1697 * @adev: amdgpu_device pointer
1699 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1703 if (adev->gmc.xgmi.connected_to_cpu)
1704 amdgpu_gmc_init_pdb0(adev);
1706 if (adev->gart.bo == NULL) {
1707 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1711 r = amdgpu_gart_table_vram_pin(adev);
1715 r = adev->gfxhub.funcs->gart_enable(adev);
1719 r = adev->mmhub.funcs->gart_enable(adev);
1723 DRM_INFO("PCIE GART of %uM enabled.\n",
1724 (unsigned)(adev->gmc.gart_size >> 20));
1725 if (adev->gmc.pdb0_bo)
1726 DRM_INFO("PDB0 located at 0x%016llX\n",
1727 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1728 DRM_INFO("PTB located at 0x%016llX\n",
1729 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1731 adev->gart.ready = true;
1735 static int gmc_v9_0_hw_init(void *handle)
1737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1741 /* The sequence of these two function calls matters.*/
1742 gmc_v9_0_init_golden_registers(adev);
1744 if (adev->mode_info.num_crtc) {
1745 /* Lockout access through VGA aperture*/
1746 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1747 /* disable VGA render */
1748 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1751 if (adev->mmhub.funcs->update_power_gating)
1752 adev->mmhub.funcs->update_power_gating(adev, true);
1754 adev->hdp.funcs->init_registers(adev);
1756 /* After HDP is initialized, flush HDP.*/
1757 adev->hdp.funcs->flush_hdp(adev, NULL);
1759 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1764 if (!amdgpu_sriov_vf(adev)) {
1765 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1766 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1768 for (i = 0; i < adev->num_vmhubs; ++i)
1769 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1771 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1772 adev->umc.funcs->init_registers(adev);
1774 r = gmc_v9_0_gart_enable(adev);
1780 * gmc_v9_0_gart_disable - gart disable
1782 * @adev: amdgpu_device pointer
1784 * This disables all VM page table.
1786 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1788 adev->gfxhub.funcs->gart_disable(adev);
1789 adev->mmhub.funcs->gart_disable(adev);
1790 amdgpu_gart_table_vram_unpin(adev);
1793 static int gmc_v9_0_hw_fini(void *handle)
1795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1797 if (amdgpu_sriov_vf(adev)) {
1798 /* full access mode, so don't touch any GMC register */
1799 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1803 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1804 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1805 gmc_v9_0_gart_disable(adev);
1810 static int gmc_v9_0_suspend(void *handle)
1812 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1814 return gmc_v9_0_hw_fini(adev);
1817 static int gmc_v9_0_resume(void *handle)
1820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1822 r = gmc_v9_0_hw_init(adev);
1826 amdgpu_vmid_reset_all(adev);
1831 static bool gmc_v9_0_is_idle(void *handle)
1833 /* MC is always ready in GMC v9.*/
1837 static int gmc_v9_0_wait_for_idle(void *handle)
1839 /* There is no need to wait for MC idle in GMC v9.*/
1843 static int gmc_v9_0_soft_reset(void *handle)
1845 /* XXX for emulation.*/
1849 static int gmc_v9_0_set_clockgating_state(void *handle,
1850 enum amd_clockgating_state state)
1852 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1854 adev->mmhub.funcs->set_clockgating(adev, state);
1856 athub_v1_0_set_clockgating(adev, state);
1861 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1863 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1865 adev->mmhub.funcs->get_clockgating(adev, flags);
1867 athub_v1_0_get_clockgating(adev, flags);
1870 static int gmc_v9_0_set_powergating_state(void *handle,
1871 enum amd_powergating_state state)
1876 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1878 .early_init = gmc_v9_0_early_init,
1879 .late_init = gmc_v9_0_late_init,
1880 .sw_init = gmc_v9_0_sw_init,
1881 .sw_fini = gmc_v9_0_sw_fini,
1882 .hw_init = gmc_v9_0_hw_init,
1883 .hw_fini = gmc_v9_0_hw_fini,
1884 .suspend = gmc_v9_0_suspend,
1885 .resume = gmc_v9_0_resume,
1886 .is_idle = gmc_v9_0_is_idle,
1887 .wait_for_idle = gmc_v9_0_wait_for_idle,
1888 .soft_reset = gmc_v9_0_soft_reset,
1889 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1890 .set_powergating_state = gmc_v9_0_set_powergating_state,
1891 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1894 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1896 .type = AMD_IP_BLOCK_TYPE_GMC,
1900 .funcs = &gmc_v9_0_ip_funcs,