2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
57 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
59 #include "amdgpu_ras.h"
60 #include "amdgpu_xgmi.h"
62 /* add these here since we already include dce12 headers and these are for DCN */
63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
69 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
70 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
73 static const char *gfxhub_client_ids[] = {
89 static const char *mmhub_client_ids_raven[][2] = {
114 static const char *mmhub_client_ids_renoir[][2] = {
142 static const char *mmhub_client_ids_vega10[][2] = {
155 [32+14][0] = "SDMA0",
168 [32+4][1] = "DCEDWB",
171 [32+14][1] = "SDMA1",
174 static const char *mmhub_client_ids_vega12[][2] = {
187 [32+15][0] = "SDMA0",
197 [32+1][1] = "DCEDWB",
203 [32+15][1] = "SDMA1",
206 static const char *mmhub_client_ids_vega20[][2] = {
220 [32+12][0] = "UTCL2",
221 [32+14][0] = "SDMA1",
239 [32+14][1] = "SDMA1",
242 static const char *mmhub_client_ids_arcturus[][2] = {
283 static const char *mmhub_client_ids_aldebaran[][2] = {
286 [32+1][0] = "DBGU_IO0",
287 [32+2][0] = "DBGU_IO2",
289 [96+11][0] = "JPEG0",
291 [96+13][0] = "VCNU0",
292 [128+11][0] = "JPEG1",
293 [128+12][0] = "VCN1",
294 [128+13][0] = "VCNU1",
297 [256+0][0] = "SDMA0",
298 [256+1][0] = "SDMA1",
299 [256+2][0] = "SDMA2",
300 [256+3][0] = "SDMA3",
301 [256+4][0] = "SDMA4",
305 [32+1][1] = "DBGU_IO0",
306 [32+2][1] = "DBGU_IO2",
308 [96+11][1] = "JPEG0",
310 [96+13][1] = "VCNU0",
311 [128+11][1] = "JPEG1",
312 [128+12][1] = "VCN1",
313 [128+13][1] = "VCNU1",
316 [256+0][1] = "SDMA0",
317 [256+1][1] = "SDMA1",
318 [256+2][1] = "SDMA2",
319 [256+3][1] = "SDMA3",
320 [256+4][1] = "SDMA4",
324 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
326 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
327 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
330 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
332 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
333 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
336 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
337 (0x000143c0 + 0x00000000),
338 (0x000143c0 + 0x00000800),
339 (0x000143c0 + 0x00001000),
340 (0x000143c0 + 0x00001800),
341 (0x000543c0 + 0x00000000),
342 (0x000543c0 + 0x00000800),
343 (0x000543c0 + 0x00001000),
344 (0x000543c0 + 0x00001800),
345 (0x000943c0 + 0x00000000),
346 (0x000943c0 + 0x00000800),
347 (0x000943c0 + 0x00001000),
348 (0x000943c0 + 0x00001800),
349 (0x000d43c0 + 0x00000000),
350 (0x000d43c0 + 0x00000800),
351 (0x000d43c0 + 0x00001000),
352 (0x000d43c0 + 0x00001800),
353 (0x001143c0 + 0x00000000),
354 (0x001143c0 + 0x00000800),
355 (0x001143c0 + 0x00001000),
356 (0x001143c0 + 0x00001800),
357 (0x001543c0 + 0x00000000),
358 (0x001543c0 + 0x00000800),
359 (0x001543c0 + 0x00001000),
360 (0x001543c0 + 0x00001800),
361 (0x001943c0 + 0x00000000),
362 (0x001943c0 + 0x00000800),
363 (0x001943c0 + 0x00001000),
364 (0x001943c0 + 0x00001800),
365 (0x001d43c0 + 0x00000000),
366 (0x001d43c0 + 0x00000800),
367 (0x001d43c0 + 0x00001000),
368 (0x001d43c0 + 0x00001800),
371 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
372 (0x000143e0 + 0x00000000),
373 (0x000143e0 + 0x00000800),
374 (0x000143e0 + 0x00001000),
375 (0x000143e0 + 0x00001800),
376 (0x000543e0 + 0x00000000),
377 (0x000543e0 + 0x00000800),
378 (0x000543e0 + 0x00001000),
379 (0x000543e0 + 0x00001800),
380 (0x000943e0 + 0x00000000),
381 (0x000943e0 + 0x00000800),
382 (0x000943e0 + 0x00001000),
383 (0x000943e0 + 0x00001800),
384 (0x000d43e0 + 0x00000000),
385 (0x000d43e0 + 0x00000800),
386 (0x000d43e0 + 0x00001000),
387 (0x000d43e0 + 0x00001800),
388 (0x001143e0 + 0x00000000),
389 (0x001143e0 + 0x00000800),
390 (0x001143e0 + 0x00001000),
391 (0x001143e0 + 0x00001800),
392 (0x001543e0 + 0x00000000),
393 (0x001543e0 + 0x00000800),
394 (0x001543e0 + 0x00001000),
395 (0x001543e0 + 0x00001800),
396 (0x001943e0 + 0x00000000),
397 (0x001943e0 + 0x00000800),
398 (0x001943e0 + 0x00001000),
399 (0x001943e0 + 0x00001800),
400 (0x001d43e0 + 0x00000000),
401 (0x001d43e0 + 0x00000800),
402 (0x001d43e0 + 0x00001000),
403 (0x001d43e0 + 0x00001800),
406 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
407 struct amdgpu_irq_src *src,
409 enum amdgpu_interrupt_state state)
411 u32 bits, i, tmp, reg;
413 /* Devices newer then VEGA10/12 shall have these programming
414 sequences performed by PSP BL */
415 if (adev->asic_type >= CHIP_VEGA20)
421 case AMDGPU_IRQ_STATE_DISABLE:
422 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
423 reg = ecc_umc_mcumc_ctrl_addrs[i];
428 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
429 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
435 case AMDGPU_IRQ_STATE_ENABLE:
436 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
437 reg = ecc_umc_mcumc_ctrl_addrs[i];
442 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
443 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
456 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
457 struct amdgpu_irq_src *src,
459 enum amdgpu_interrupt_state state)
461 struct amdgpu_vmhub *hub;
462 u32 tmp, reg, bits, i, j;
464 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
465 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
466 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
467 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
468 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
469 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
470 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
473 case AMDGPU_IRQ_STATE_DISABLE:
474 for (j = 0; j < adev->num_vmhubs; j++) {
475 hub = &adev->vmhub[j];
476 for (i = 0; i < 16; i++) {
477 reg = hub->vm_context0_cntl + i;
484 case AMDGPU_IRQ_STATE_ENABLE:
485 for (j = 0; j < adev->num_vmhubs; j++) {
486 hub = &adev->vmhub[j];
487 for (i = 0; i < 16; i++) {
488 reg = hub->vm_context0_cntl + i;
502 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
503 struct amdgpu_irq_src *source,
504 struct amdgpu_iv_entry *entry)
506 bool retry_fault = !!(entry->src_data[1] & 0x80);
507 uint32_t status = 0, cid = 0, rw = 0;
508 struct amdgpu_task_info task_info;
509 struct amdgpu_vmhub *hub;
510 const char *mmhub_cid;
511 const char *hub_name;
514 addr = (u64)entry->src_data[0] << 12;
515 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
518 /* Returning 1 here also prevents sending the IV to the KFD */
520 /* Process it onyl if it's the first fault for this address */
521 if (entry->ih != &adev->irq.ih_soft &&
522 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
526 /* Delegate it to a different ring if the hardware hasn't
529 if (entry->ih == &adev->irq.ih) {
530 amdgpu_irq_delegate(adev, entry, 8);
534 /* Try to handle the recoverable page faults by filling page
537 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
541 if (!printk_ratelimit())
544 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
546 hub = &adev->vmhub[AMDGPU_MMHUB_0];
547 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
549 hub = &adev->vmhub[AMDGPU_MMHUB_1];
551 hub_name = "gfxhub0";
552 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
555 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
556 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
559 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
560 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
561 hub_name, retry_fault ? "retry" : "no-retry",
562 entry->src_id, entry->ring_id, entry->vmid,
563 entry->pasid, task_info.process_name, task_info.tgid,
564 task_info.task_name, task_info.pid);
565 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
566 addr, entry->client_id,
567 soc15_ih_clientid_name[entry->client_id]);
569 if (amdgpu_sriov_vf(adev))
573 * Issue a dummy read to wait for the status register to
574 * be updated to avoid reading an incorrect value due to
575 * the new fast GRBM interface.
577 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
578 (adev->asic_type < CHIP_ALDEBARAN))
579 RREG32(hub->vm_l2_pro_fault_status);
581 status = RREG32(hub->vm_l2_pro_fault_status);
582 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
583 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
584 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
588 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
590 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
591 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
592 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
593 gfxhub_client_ids[cid],
596 switch (adev->asic_type) {
598 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
601 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
604 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
607 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
610 mmhub_cid = mmhub_client_ids_raven[cid][rw];
613 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
616 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
622 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
623 mmhub_cid ? mmhub_cid : "unknown", cid);
625 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
626 REG_GET_FIELD(status,
627 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
628 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
629 REG_GET_FIELD(status,
630 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
631 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
632 REG_GET_FIELD(status,
633 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
634 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
635 REG_GET_FIELD(status,
636 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
637 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
641 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
642 .set = gmc_v9_0_vm_fault_interrupt_state,
643 .process = gmc_v9_0_process_interrupt,
647 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
648 .set = gmc_v9_0_ecc_interrupt_state,
649 .process = amdgpu_umc_process_ecc_irq,
652 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
654 adev->gmc.vm_fault.num_types = 1;
655 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
657 if (!amdgpu_sriov_vf(adev) &&
658 !adev->gmc.xgmi.connected_to_cpu) {
659 adev->gmc.ecc_irq.num_types = 1;
660 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
664 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
669 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
670 PER_VMID_INVALIDATE_REQ, 1 << vmid);
671 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
672 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
673 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
674 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
675 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
676 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
677 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
678 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
684 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
686 * @adev: amdgpu_device pointer
690 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
693 if (adev->asic_type == CHIP_ALDEBARAN)
696 return ((vmhub == AMDGPU_MMHUB_0 ||
697 vmhub == AMDGPU_MMHUB_1) &&
698 (!amdgpu_sriov_vf(adev)) &&
699 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
700 (adev->apu_flags & AMD_APU_IS_PICASSO))));
703 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
704 uint8_t vmid, uint16_t *p_pasid)
708 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
710 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
712 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
717 * VMID 0 is the physical GPU addresses as used by the kernel.
718 * VMIDs 1-15 are used for userspace clients and are handled
719 * by the amdgpu vm/hsa code.
723 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
725 * @adev: amdgpu_device pointer
726 * @vmid: vm instance to flush
727 * @vmhub: which hub to flush
728 * @flush_type: the flush type
730 * Flush the TLB for the requested page table using certain type.
732 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
733 uint32_t vmhub, uint32_t flush_type)
735 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
736 const unsigned eng = 17;
737 u32 j, inv_req, inv_req2, tmp;
738 struct amdgpu_vmhub *hub;
740 BUG_ON(vmhub >= adev->num_vmhubs);
742 hub = &adev->vmhub[vmhub];
743 if (adev->gmc.xgmi.num_physical_nodes &&
744 adev->asic_type == CHIP_VEGA20) {
745 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
746 * heavy-weight TLB flush (type 2), which flushes
747 * both. Due to a race condition with concurrent
748 * memory accesses using the same TLB cache line, we
749 * still need a second TLB flush after this.
751 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
752 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
754 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
758 /* This is necessary for a HW workaround under SRIOV as well
759 * as GFXOFF under bare metal
761 if (adev->gfx.kiq.ring.sched.ready &&
762 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
763 down_read_trylock(&adev->reset_sem)) {
764 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
765 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
767 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
769 up_read(&adev->reset_sem);
773 spin_lock(&adev->gmc.invalidate_lock);
776 * It may lose gpuvm invalidate acknowldege state across power-gating
777 * off cycle, add semaphore acquire before invalidation and semaphore
778 * release after invalidation to avoid entering power gated state
782 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
784 for (j = 0; j < adev->usec_timeout; j++) {
785 /* a read return value of 1 means semaphore acuqire */
786 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
787 hub->eng_distance * eng);
793 if (j >= adev->usec_timeout)
794 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
798 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
799 hub->eng_distance * eng, inv_req);
802 * Issue a dummy read to wait for the ACK register to
803 * be cleared to avoid a false ACK due to the new fast
806 if ((vmhub == AMDGPU_GFXHUB_0) &&
807 (adev->asic_type < CHIP_ALDEBARAN))
808 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
809 hub->eng_distance * eng);
811 for (j = 0; j < adev->usec_timeout; j++) {
812 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
813 hub->eng_distance * eng);
814 if (tmp & (1 << vmid))
823 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
826 * add semaphore release after invalidation,
827 * write with 0 means semaphore release
829 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
830 hub->eng_distance * eng, 0);
832 spin_unlock(&adev->gmc.invalidate_lock);
834 if (j < adev->usec_timeout)
837 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
841 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
843 * @adev: amdgpu_device pointer
844 * @pasid: pasid to be flush
845 * @flush_type: the flush type
846 * @all_hub: flush all hubs
848 * Flush the TLB for the requested pasid.
850 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
851 uint16_t pasid, uint32_t flush_type,
857 uint16_t queried_pasid;
859 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
860 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
862 if (amdgpu_in_reset(adev))
865 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
866 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
867 * heavy-weight TLB flush (type 2), which flushes
868 * both. Due to a race condition with concurrent
869 * memory accesses using the same TLB cache line, we
870 * still need a second TLB flush after this.
872 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
873 adev->asic_type == CHIP_VEGA20);
874 /* 2 dwords flush + 8 dwords fence */
875 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
878 ndw += kiq->pmf->invalidate_tlbs_size;
880 spin_lock(&adev->gfx.kiq.ring_lock);
881 /* 2 dwords flush + 8 dwords fence */
882 amdgpu_ring_alloc(ring, ndw);
884 kiq->pmf->kiq_invalidate_tlbs(ring,
886 kiq->pmf->kiq_invalidate_tlbs(ring,
887 pasid, flush_type, all_hub);
888 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
890 amdgpu_ring_undo(ring);
891 spin_unlock(&adev->gfx.kiq.ring_lock);
892 up_read(&adev->reset_sem);
896 amdgpu_ring_commit(ring);
897 spin_unlock(&adev->gfx.kiq.ring_lock);
898 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
900 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
901 up_read(&adev->reset_sem);
904 up_read(&adev->reset_sem);
908 for (vmid = 1; vmid < 16; vmid++) {
910 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
912 if (ret && queried_pasid == pasid) {
914 for (i = 0; i < adev->num_vmhubs; i++)
915 gmc_v9_0_flush_gpu_tlb(adev, vmid,
918 gmc_v9_0_flush_gpu_tlb(adev, vmid,
919 AMDGPU_GFXHUB_0, flush_type);
929 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
930 unsigned vmid, uint64_t pd_addr)
932 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
933 struct amdgpu_device *adev = ring->adev;
934 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
935 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
936 unsigned eng = ring->vm_inv_eng;
939 * It may lose gpuvm invalidate acknowldege state across power-gating
940 * off cycle, add semaphore acquire before invalidation and semaphore
941 * release after invalidation to avoid entering power gated state
945 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
947 /* a read return value of 1 means semaphore acuqire */
948 amdgpu_ring_emit_reg_wait(ring,
949 hub->vm_inv_eng0_sem +
950 hub->eng_distance * eng, 0x1, 0x1);
952 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
953 (hub->ctx_addr_distance * vmid),
954 lower_32_bits(pd_addr));
956 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
957 (hub->ctx_addr_distance * vmid),
958 upper_32_bits(pd_addr));
960 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
961 hub->eng_distance * eng,
962 hub->vm_inv_eng0_ack +
963 hub->eng_distance * eng,
966 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
969 * add semaphore release after invalidation,
970 * write with 0 means semaphore release
972 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
973 hub->eng_distance * eng, 0);
978 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
981 struct amdgpu_device *adev = ring->adev;
984 /* Do nothing because there's no lut register for mmhub1. */
985 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
988 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
989 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
991 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
993 amdgpu_ring_emit_wreg(ring, reg, pasid);
997 * PTE format on VEGA 10:
1006 * 47:12 4k physical page base address
1016 * PDE format on VEGA 10:
1017 * 63:59 block fragment size
1021 * 47:6 physical base address of PD or PTE
1028 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1032 case AMDGPU_VM_MTYPE_DEFAULT:
1033 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1034 case AMDGPU_VM_MTYPE_NC:
1035 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1036 case AMDGPU_VM_MTYPE_WC:
1037 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1038 case AMDGPU_VM_MTYPE_RW:
1039 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1040 case AMDGPU_VM_MTYPE_CC:
1041 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1042 case AMDGPU_VM_MTYPE_UC:
1043 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1045 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1049 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1050 uint64_t *addr, uint64_t *flags)
1052 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1053 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1054 BUG_ON(*addr & 0xFFFF00000000003FULL);
1056 if (!adev->gmc.translate_further)
1059 if (level == AMDGPU_VM_PDB1) {
1060 /* Set the block fragment size */
1061 if (!(*flags & AMDGPU_PDE_PTE))
1062 *flags |= AMDGPU_PDE_BFS(0x9);
1064 } else if (level == AMDGPU_VM_PDB0) {
1065 if (*flags & AMDGPU_PDE_PTE)
1066 *flags &= ~AMDGPU_PDE_PTE;
1068 *flags |= AMDGPU_PTE_TF;
1072 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1073 struct amdgpu_bo_va_mapping *mapping,
1076 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1077 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1079 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1080 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1082 if (mapping->flags & AMDGPU_PTE_PRT) {
1083 *flags |= AMDGPU_PTE_PRT;
1084 *flags &= ~AMDGPU_PTE_VALID;
1087 if ((adev->asic_type == CHIP_ARCTURUS ||
1088 adev->asic_type == CHIP_ALDEBARAN) &&
1089 !(*flags & AMDGPU_PTE_SYSTEM) &&
1090 mapping->bo_va->is_xgmi)
1091 *flags |= AMDGPU_PTE_SNOOPED;
1093 if (adev->asic_type == CHIP_ALDEBARAN)
1094 *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1097 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1099 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1102 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1103 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1107 switch (adev->asic_type) {
1110 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1111 size = (REG_GET_FIELD(viewport,
1112 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1113 REG_GET_FIELD(viewport,
1114 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1121 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1122 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1123 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1132 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1133 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1134 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1135 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1136 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1137 .map_mtype = gmc_v9_0_map_mtype,
1138 .get_vm_pde = gmc_v9_0_get_vm_pde,
1139 .get_vm_pte = gmc_v9_0_get_vm_pte,
1140 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1143 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1145 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1148 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1150 switch (adev->asic_type) {
1152 adev->umc.funcs = &umc_v6_0_funcs;
1155 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1156 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1157 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1158 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1159 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1160 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1163 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1164 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1165 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1166 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1167 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1168 adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1175 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1177 switch (adev->asic_type) {
1179 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1181 case CHIP_ALDEBARAN:
1182 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1185 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1190 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1192 switch (adev->asic_type) {
1194 adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
1197 adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
1199 case CHIP_ALDEBARAN:
1200 adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
1203 /* mmhub ras is not available */
1208 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1210 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1213 static int gmc_v9_0_early_init(void *handle)
1215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1217 if (adev->asic_type == CHIP_VEGA20 ||
1218 adev->asic_type == CHIP_ARCTURUS)
1219 adev->gmc.xgmi.supported = true;
1221 if (adev->asic_type == CHIP_ALDEBARAN) {
1222 adev->gmc.xgmi.supported = true;
1223 adev->gmc.xgmi.connected_to_cpu =
1224 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1227 gmc_v9_0_set_gmc_funcs(adev);
1228 gmc_v9_0_set_irq_funcs(adev);
1229 gmc_v9_0_set_umc_funcs(adev);
1230 gmc_v9_0_set_mmhub_funcs(adev);
1231 gmc_v9_0_set_mmhub_ras_funcs(adev);
1232 gmc_v9_0_set_gfxhub_funcs(adev);
1234 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1235 adev->gmc.shared_aperture_end =
1236 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1237 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1238 adev->gmc.private_aperture_end =
1239 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1244 static int gmc_v9_0_late_init(void *handle)
1246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1254 * Workaround performance drop issue with VBIOS enables partial
1255 * writes, while disables HBM ECC for vega10.
1257 if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1258 if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1259 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1260 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1264 if (adev->mmhub.ras_funcs &&
1265 adev->mmhub.ras_funcs->reset_ras_error_count)
1266 adev->mmhub.ras_funcs->reset_ras_error_count(adev);
1268 r = amdgpu_gmc_ras_late_init(adev);
1272 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1275 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1276 struct amdgpu_gmc *mc)
1280 if (!amdgpu_sriov_vf(adev))
1281 base = adev->mmhub.funcs->get_fb_location(adev);
1283 /* add the xgmi offset of the physical node */
1284 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1285 if (adev->gmc.xgmi.connected_to_cpu) {
1286 amdgpu_gmc_sysvm_location(adev, mc);
1288 amdgpu_gmc_vram_location(adev, mc, base);
1289 amdgpu_gmc_gart_location(adev, mc);
1290 amdgpu_gmc_agp_location(adev, mc);
1292 /* base offset of vram pages */
1293 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1295 /* XXX: add the xgmi offset of the physical node? */
1296 adev->vm_manager.vram_base_offset +=
1297 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1301 * gmc_v9_0_mc_init - initialize the memory controller driver params
1303 * @adev: amdgpu_device pointer
1305 * Look up the amount of vram, vram width, and decide how to place
1306 * vram and gart within the GPU's physical address space.
1307 * Returns 0 for success.
1309 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1313 /* size in MB on si */
1314 adev->gmc.mc_vram_size =
1315 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1316 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1318 if (!(adev->flags & AMD_IS_APU) &&
1319 !adev->gmc.xgmi.connected_to_cpu) {
1320 r = amdgpu_device_resize_fb_bar(adev);
1324 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1325 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1327 #ifdef CONFIG_X86_64
1329 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1330 * interface can use VRAM through here as it appears system reserved
1331 * memory in host address space.
1333 * For APUs, VRAM is just the stolen system memory and can be accessed
1336 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1339 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1340 if ((adev->flags & AMD_IS_APU) ||
1341 (adev->gmc.xgmi.supported &&
1342 adev->gmc.xgmi.connected_to_cpu)) {
1343 adev->gmc.aper_base =
1344 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1345 adev->gmc.xgmi.physical_node_id *
1346 adev->gmc.xgmi.node_segment_size;
1347 adev->gmc.aper_size = adev->gmc.real_vram_size;
1351 /* In case the PCI BAR is larger than the actual amount of vram */
1352 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1353 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1354 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1356 /* set the gart size */
1357 if (amdgpu_gart_size == -1) {
1358 switch (adev->asic_type) {
1359 case CHIP_VEGA10: /* all engines support GPUVM */
1360 case CHIP_VEGA12: /* all engines support GPUVM */
1363 case CHIP_ALDEBARAN:
1365 adev->gmc.gart_size = 512ULL << 20;
1367 case CHIP_RAVEN: /* DCE SG support */
1369 adev->gmc.gart_size = 1024ULL << 20;
1373 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1376 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1378 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1383 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1387 if (adev->gart.bo) {
1388 WARN(1, "VEGA10 PCIE GART already initialized\n");
1392 if (adev->gmc.xgmi.connected_to_cpu) {
1393 adev->gmc.vmid0_page_table_depth = 1;
1394 adev->gmc.vmid0_page_table_block_size = 12;
1396 adev->gmc.vmid0_page_table_depth = 0;
1397 adev->gmc.vmid0_page_table_block_size = 0;
1400 /* Initialize common gart structure */
1401 r = amdgpu_gart_init(adev);
1404 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1405 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1406 AMDGPU_PTE_EXECUTABLE;
1408 r = amdgpu_gart_table_vram_alloc(adev);
1412 if (adev->gmc.xgmi.connected_to_cpu) {
1413 r = amdgpu_gmc_pdb0_alloc(adev);
1420 * gmc_v9_0_save_registers - saves regs
1422 * @adev: amdgpu_device pointer
1424 * This saves potential register values that should be
1425 * restored upon resume
1427 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1429 if (adev->asic_type == CHIP_RAVEN)
1430 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1433 static int gmc_v9_0_sw_init(void *handle)
1435 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1438 adev->gfxhub.funcs->init(adev);
1440 adev->mmhub.funcs->init(adev);
1442 spin_lock_init(&adev->gmc.invalidate_lock);
1444 r = amdgpu_atomfirmware_get_vram_info(adev,
1445 &vram_width, &vram_type, &vram_vendor);
1446 if (amdgpu_sriov_vf(adev))
1447 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1448 * and DF related registers is not readable, seems hardcord is the
1449 * only way to set the correct vram_width
1451 adev->gmc.vram_width = 2048;
1452 else if (amdgpu_emu_mode != 1)
1453 adev->gmc.vram_width = vram_width;
1455 if (!adev->gmc.vram_width) {
1456 int chansize, numchan;
1458 /* hbm memory channel size */
1459 if (adev->flags & AMD_IS_APU)
1464 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1465 adev->gmc.vram_width = numchan * chansize;
1468 adev->gmc.vram_type = vram_type;
1469 adev->gmc.vram_vendor = vram_vendor;
1470 switch (adev->asic_type) {
1472 adev->num_vmhubs = 2;
1474 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1475 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1477 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1478 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1479 adev->gmc.translate_further =
1480 adev->vm_manager.num_level > 1;
1487 case CHIP_ALDEBARAN:
1488 adev->num_vmhubs = 2;
1492 * To fulfill 4-level page support,
1493 * vm size is 256TB (48bit), maximum size of Vega10,
1494 * block size 512 (9bit)
1496 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1497 if (amdgpu_sriov_vf(adev))
1498 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1500 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1503 adev->num_vmhubs = 3;
1505 /* Keep the vm size same with Vega20 */
1506 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1512 /* This interrupt is VMC page fault.*/
1513 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1514 &adev->gmc.vm_fault);
1518 if (adev->asic_type == CHIP_ARCTURUS) {
1519 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1520 &adev->gmc.vm_fault);
1525 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1526 &adev->gmc.vm_fault);
1531 if (!amdgpu_sriov_vf(adev) &&
1532 !adev->gmc.xgmi.connected_to_cpu) {
1533 /* interrupt sent to DF. */
1534 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1535 &adev->gmc.ecc_irq);
1540 /* Set the internal MC address mask
1541 * This is the max address of the GPU's
1542 * internal address space.
1544 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1546 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1548 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1551 adev->need_swiotlb = drm_need_swiotlb(44);
1553 if (adev->gmc.xgmi.supported) {
1554 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1559 r = gmc_v9_0_mc_init(adev);
1563 amdgpu_gmc_get_vbios_allocations(adev);
1565 /* Memory manager */
1566 r = amdgpu_bo_init(adev);
1570 r = gmc_v9_0_gart_init(adev);
1576 * VMID 0 is reserved for System
1577 * amdgpu graphics/compute will use VMIDs 1..n-1
1578 * amdkfd will use VMIDs n..15
1580 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1581 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1582 * for video processing.
1584 adev->vm_manager.first_kfd_vmid =
1585 (adev->asic_type == CHIP_ARCTURUS ||
1586 adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
1588 amdgpu_vm_manager_init(adev);
1590 gmc_v9_0_save_registers(adev);
1595 static int gmc_v9_0_sw_fini(void *handle)
1597 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1599 amdgpu_gmc_ras_fini(adev);
1600 amdgpu_gem_force_release(adev);
1601 amdgpu_vm_manager_fini(adev);
1602 amdgpu_gart_table_vram_free(adev);
1603 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
1604 amdgpu_bo_fini(adev);
1605 amdgpu_gart_fini(adev);
1610 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1613 switch (adev->asic_type) {
1615 if (amdgpu_sriov_vf(adev))
1619 soc15_program_register_sequence(adev,
1620 golden_settings_mmhub_1_0_0,
1621 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1622 soc15_program_register_sequence(adev,
1623 golden_settings_athub_1_0_0,
1624 ARRAY_SIZE(golden_settings_athub_1_0_0));
1629 /* TODO for renoir */
1630 soc15_program_register_sequence(adev,
1631 golden_settings_athub_1_0_0,
1632 ARRAY_SIZE(golden_settings_athub_1_0_0));
1640 * gmc_v9_0_restore_registers - restores regs
1642 * @adev: amdgpu_device pointer
1644 * This restores register values, saved at suspend.
1646 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1648 if (adev->asic_type == CHIP_RAVEN) {
1649 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1650 WARN_ON(adev->gmc.sdpif_register !=
1651 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1656 * gmc_v9_0_gart_enable - gart enable
1658 * @adev: amdgpu_device pointer
1660 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1664 if (adev->gmc.xgmi.connected_to_cpu)
1665 amdgpu_gmc_init_pdb0(adev);
1667 if (adev->gart.bo == NULL) {
1668 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1672 r = amdgpu_gart_table_vram_pin(adev);
1676 r = adev->gfxhub.funcs->gart_enable(adev);
1680 r = adev->mmhub.funcs->gart_enable(adev);
1684 DRM_INFO("PCIE GART of %uM enabled.\n",
1685 (unsigned)(adev->gmc.gart_size >> 20));
1686 if (adev->gmc.pdb0_bo)
1687 DRM_INFO("PDB0 located at 0x%016llX\n",
1688 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1689 DRM_INFO("PTB located at 0x%016llX\n",
1690 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1692 adev->gart.ready = true;
1696 static int gmc_v9_0_hw_init(void *handle)
1698 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1702 /* The sequence of these two function calls matters.*/
1703 gmc_v9_0_init_golden_registers(adev);
1705 if (adev->mode_info.num_crtc) {
1706 /* Lockout access through VGA aperture*/
1707 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1708 /* disable VGA render */
1709 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1712 if (adev->mmhub.funcs->update_power_gating)
1713 adev->mmhub.funcs->update_power_gating(adev, true);
1715 adev->hdp.funcs->init_registers(adev);
1717 /* After HDP is initialized, flush HDP.*/
1718 adev->hdp.funcs->flush_hdp(adev, NULL);
1720 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1725 if (!amdgpu_sriov_vf(adev)) {
1726 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1727 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1729 for (i = 0; i < adev->num_vmhubs; ++i)
1730 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1732 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1733 adev->umc.funcs->init_registers(adev);
1735 r = gmc_v9_0_gart_enable(adev);
1741 * gmc_v9_0_gart_disable - gart disable
1743 * @adev: amdgpu_device pointer
1745 * This disables all VM page table.
1747 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1749 adev->gfxhub.funcs->gart_disable(adev);
1750 adev->mmhub.funcs->gart_disable(adev);
1751 amdgpu_gart_table_vram_unpin(adev);
1754 static int gmc_v9_0_hw_fini(void *handle)
1756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1758 if (amdgpu_sriov_vf(adev)) {
1759 /* full access mode, so don't touch any GMC register */
1760 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1764 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1765 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1766 gmc_v9_0_gart_disable(adev);
1771 static int gmc_v9_0_suspend(void *handle)
1773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1775 return gmc_v9_0_hw_fini(adev);
1778 static int gmc_v9_0_resume(void *handle)
1781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1783 r = gmc_v9_0_hw_init(adev);
1787 amdgpu_vmid_reset_all(adev);
1792 static bool gmc_v9_0_is_idle(void *handle)
1794 /* MC is always ready in GMC v9.*/
1798 static int gmc_v9_0_wait_for_idle(void *handle)
1800 /* There is no need to wait for MC idle in GMC v9.*/
1804 static int gmc_v9_0_soft_reset(void *handle)
1806 /* XXX for emulation.*/
1810 static int gmc_v9_0_set_clockgating_state(void *handle,
1811 enum amd_clockgating_state state)
1813 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1815 adev->mmhub.funcs->set_clockgating(adev, state);
1817 athub_v1_0_set_clockgating(adev, state);
1822 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1826 adev->mmhub.funcs->get_clockgating(adev, flags);
1828 athub_v1_0_get_clockgating(adev, flags);
1831 static int gmc_v9_0_set_powergating_state(void *handle,
1832 enum amd_powergating_state state)
1837 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1839 .early_init = gmc_v9_0_early_init,
1840 .late_init = gmc_v9_0_late_init,
1841 .sw_init = gmc_v9_0_sw_init,
1842 .sw_fini = gmc_v9_0_sw_fini,
1843 .hw_init = gmc_v9_0_hw_init,
1844 .hw_fini = gmc_v9_0_hw_fini,
1845 .suspend = gmc_v9_0_suspend,
1846 .resume = gmc_v9_0_resume,
1847 .is_idle = gmc_v9_0_is_idle,
1848 .wait_for_idle = gmc_v9_0_wait_for_idle,
1849 .soft_reset = gmc_v9_0_soft_reset,
1850 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1851 .set_powergating_state = gmc_v9_0_set_powergating_state,
1852 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1855 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1857 .type = AMD_IP_BLOCK_TYPE_GMC,
1861 .funcs = &gmc_v9_0_ip_funcs,