2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
56 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_xgmi.h"
61 /* add these here since we already include dce12 headers and these are for DCN */
62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
68 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
69 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
72 static const char *gfxhub_client_ids[] = {
88 static const char *mmhub_client_ids_raven[][2] = {
113 static const char *mmhub_client_ids_renoir[][2] = {
141 static const char *mmhub_client_ids_vega10[][2] = {
154 [32+14][0] = "SDMA0",
167 [32+4][1] = "DCEDWB",
170 [32+14][1] = "SDMA1",
173 static const char *mmhub_client_ids_vega12[][2] = {
186 [32+15][0] = "SDMA0",
196 [32+1][1] = "DCEDWB",
202 [32+15][1] = "SDMA1",
205 static const char *mmhub_client_ids_vega20[][2] = {
219 [32+12][0] = "UTCL2",
220 [32+14][0] = "SDMA1",
238 [32+14][1] = "SDMA1",
241 static const char *mmhub_client_ids_arcturus[][2] = {
248 [32+15][0] = "SDMA1",
249 [64+15][0] = "SDMA2",
250 [96+15][0] = "SDMA3",
251 [128+15][0] = "SDMA4",
252 [160+11][0] = "JPEG",
254 [160+13][0] = "VCNU",
255 [160+15][0] = "SDMA5",
256 [192+10][0] = "UTCL2",
257 [192+11][0] = "JPEG1",
258 [192+12][0] = "VCN1",
259 [192+13][0] = "VCN1U",
260 [192+15][0] = "SDMA6",
261 [224+15][0] = "SDMA7",
269 [32+15][1] = "SDMA1",
270 [64+15][1] = "SDMA2",
271 [96+15][1] = "SDMA3",
272 [128+15][1] = "SDMA4",
273 [160+11][1] = "JPEG",
275 [160+13][1] = "VCNU",
276 [160+15][1] = "SDMA5",
277 [192+11][1] = "JPEG1",
278 [192+12][1] = "VCN1",
279 [192+13][1] = "VCN1U",
280 [192+15][1] = "SDMA6",
281 [224+15][1] = "SDMA7",
284 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
286 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
287 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
290 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
292 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
293 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
296 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
297 (0x000143c0 + 0x00000000),
298 (0x000143c0 + 0x00000800),
299 (0x000143c0 + 0x00001000),
300 (0x000143c0 + 0x00001800),
301 (0x000543c0 + 0x00000000),
302 (0x000543c0 + 0x00000800),
303 (0x000543c0 + 0x00001000),
304 (0x000543c0 + 0x00001800),
305 (0x000943c0 + 0x00000000),
306 (0x000943c0 + 0x00000800),
307 (0x000943c0 + 0x00001000),
308 (0x000943c0 + 0x00001800),
309 (0x000d43c0 + 0x00000000),
310 (0x000d43c0 + 0x00000800),
311 (0x000d43c0 + 0x00001000),
312 (0x000d43c0 + 0x00001800),
313 (0x001143c0 + 0x00000000),
314 (0x001143c0 + 0x00000800),
315 (0x001143c0 + 0x00001000),
316 (0x001143c0 + 0x00001800),
317 (0x001543c0 + 0x00000000),
318 (0x001543c0 + 0x00000800),
319 (0x001543c0 + 0x00001000),
320 (0x001543c0 + 0x00001800),
321 (0x001943c0 + 0x00000000),
322 (0x001943c0 + 0x00000800),
323 (0x001943c0 + 0x00001000),
324 (0x001943c0 + 0x00001800),
325 (0x001d43c0 + 0x00000000),
326 (0x001d43c0 + 0x00000800),
327 (0x001d43c0 + 0x00001000),
328 (0x001d43c0 + 0x00001800),
331 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
332 (0x000143e0 + 0x00000000),
333 (0x000143e0 + 0x00000800),
334 (0x000143e0 + 0x00001000),
335 (0x000143e0 + 0x00001800),
336 (0x000543e0 + 0x00000000),
337 (0x000543e0 + 0x00000800),
338 (0x000543e0 + 0x00001000),
339 (0x000543e0 + 0x00001800),
340 (0x000943e0 + 0x00000000),
341 (0x000943e0 + 0x00000800),
342 (0x000943e0 + 0x00001000),
343 (0x000943e0 + 0x00001800),
344 (0x000d43e0 + 0x00000000),
345 (0x000d43e0 + 0x00000800),
346 (0x000d43e0 + 0x00001000),
347 (0x000d43e0 + 0x00001800),
348 (0x001143e0 + 0x00000000),
349 (0x001143e0 + 0x00000800),
350 (0x001143e0 + 0x00001000),
351 (0x001143e0 + 0x00001800),
352 (0x001543e0 + 0x00000000),
353 (0x001543e0 + 0x00000800),
354 (0x001543e0 + 0x00001000),
355 (0x001543e0 + 0x00001800),
356 (0x001943e0 + 0x00000000),
357 (0x001943e0 + 0x00000800),
358 (0x001943e0 + 0x00001000),
359 (0x001943e0 + 0x00001800),
360 (0x001d43e0 + 0x00000000),
361 (0x001d43e0 + 0x00000800),
362 (0x001d43e0 + 0x00001000),
363 (0x001d43e0 + 0x00001800),
366 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
367 struct amdgpu_irq_src *src,
369 enum amdgpu_interrupt_state state)
371 u32 bits, i, tmp, reg;
373 /* Devices newer then VEGA10/12 shall have these programming
374 sequences performed by PSP BL */
375 if (adev->asic_type >= CHIP_VEGA20)
381 case AMDGPU_IRQ_STATE_DISABLE:
382 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
383 reg = ecc_umc_mcumc_ctrl_addrs[i];
388 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
389 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
395 case AMDGPU_IRQ_STATE_ENABLE:
396 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
397 reg = ecc_umc_mcumc_ctrl_addrs[i];
402 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
403 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
416 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
417 struct amdgpu_irq_src *src,
419 enum amdgpu_interrupt_state state)
421 struct amdgpu_vmhub *hub;
422 u32 tmp, reg, bits, i, j;
424 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
425 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
426 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
427 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
428 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
429 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
430 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
433 case AMDGPU_IRQ_STATE_DISABLE:
434 for (j = 0; j < adev->num_vmhubs; j++) {
435 hub = &adev->vmhub[j];
436 for (i = 0; i < 16; i++) {
437 reg = hub->vm_context0_cntl + i;
444 case AMDGPU_IRQ_STATE_ENABLE:
445 for (j = 0; j < adev->num_vmhubs; j++) {
446 hub = &adev->vmhub[j];
447 for (i = 0; i < 16; i++) {
448 reg = hub->vm_context0_cntl + i;
462 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
463 struct amdgpu_irq_src *source,
464 struct amdgpu_iv_entry *entry)
466 bool retry_fault = !!(entry->src_data[1] & 0x80);
467 uint32_t status = 0, cid = 0, rw = 0;
468 struct amdgpu_task_info task_info;
469 struct amdgpu_vmhub *hub;
470 const char *mmhub_cid;
471 const char *hub_name;
474 addr = (u64)entry->src_data[0] << 12;
475 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
478 /* Returning 1 here also prevents sending the IV to the KFD */
480 /* Process it onyl if it's the first fault for this address */
481 if (entry->ih != &adev->irq.ih_soft &&
482 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
486 /* Delegate it to a different ring if the hardware hasn't
489 if (in_interrupt()) {
490 amdgpu_irq_delegate(adev, entry, 8);
494 /* Try to handle the recoverable page faults by filling page
497 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
501 if (!printk_ratelimit())
504 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
506 hub = &adev->vmhub[AMDGPU_MMHUB_0];
507 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
509 hub = &adev->vmhub[AMDGPU_MMHUB_1];
511 hub_name = "gfxhub0";
512 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
515 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
516 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
519 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
520 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
521 hub_name, retry_fault ? "retry" : "no-retry",
522 entry->src_id, entry->ring_id, entry->vmid,
523 entry->pasid, task_info.process_name, task_info.tgid,
524 task_info.task_name, task_info.pid);
525 dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
526 addr, entry->client_id);
528 if (amdgpu_sriov_vf(adev))
532 * Issue a dummy read to wait for the status register to
533 * be updated to avoid reading an incorrect value due to
534 * the new fast GRBM interface.
536 if (entry->vmid_src == AMDGPU_GFXHUB_0)
537 RREG32(hub->vm_l2_pro_fault_status);
539 status = RREG32(hub->vm_l2_pro_fault_status);
540 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
541 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
542 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
546 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
548 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
549 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
550 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
551 gfxhub_client_ids[cid],
554 switch (adev->asic_type) {
556 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
559 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
562 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
565 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
568 mmhub_cid = mmhub_client_ids_raven[cid][rw];
571 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
577 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
578 mmhub_cid ? mmhub_cid : "unknown", cid);
580 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
581 REG_GET_FIELD(status,
582 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
583 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
584 REG_GET_FIELD(status,
585 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
586 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
587 REG_GET_FIELD(status,
588 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
589 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
590 REG_GET_FIELD(status,
591 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
592 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
596 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
597 .set = gmc_v9_0_vm_fault_interrupt_state,
598 .process = gmc_v9_0_process_interrupt,
602 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
603 .set = gmc_v9_0_ecc_interrupt_state,
604 .process = amdgpu_umc_process_ecc_irq,
607 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
609 adev->gmc.vm_fault.num_types = 1;
610 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
612 if (!amdgpu_sriov_vf(adev)) {
613 adev->gmc.ecc_irq.num_types = 1;
614 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
618 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
623 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
624 PER_VMID_INVALIDATE_REQ, 1 << vmid);
625 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
626 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
627 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
628 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
629 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
630 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
631 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
632 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
638 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
640 * @adev: amdgpu_device pointer
644 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
647 return ((vmhub == AMDGPU_MMHUB_0 ||
648 vmhub == AMDGPU_MMHUB_1) &&
649 (!amdgpu_sriov_vf(adev)) &&
650 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
651 (adev->apu_flags & AMD_APU_IS_PICASSO))));
654 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
655 uint8_t vmid, uint16_t *p_pasid)
659 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
661 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
663 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
668 * VMID 0 is the physical GPU addresses as used by the kernel.
669 * VMIDs 1-15 are used for userspace clients and are handled
670 * by the amdgpu vm/hsa code.
674 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
676 * @adev: amdgpu_device pointer
677 * @vmid: vm instance to flush
678 * @vmhub: which hub to flush
679 * @flush_type: the flush type
681 * Flush the TLB for the requested page table using certain type.
683 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
684 uint32_t vmhub, uint32_t flush_type)
686 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
687 const unsigned eng = 17;
688 u32 j, inv_req, inv_req2, tmp;
689 struct amdgpu_vmhub *hub;
691 BUG_ON(vmhub >= adev->num_vmhubs);
693 hub = &adev->vmhub[vmhub];
694 if (adev->gmc.xgmi.num_physical_nodes &&
695 adev->asic_type == CHIP_VEGA20) {
696 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
697 * heavy-weight TLB flush (type 2), which flushes
698 * both. Due to a race condition with concurrent
699 * memory accesses using the same TLB cache line, we
700 * still need a second TLB flush after this.
702 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
703 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
705 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
709 /* This is necessary for a HW workaround under SRIOV as well
710 * as GFXOFF under bare metal
712 if (adev->gfx.kiq.ring.sched.ready &&
713 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
714 down_read_trylock(&adev->reset_sem)) {
715 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
716 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
718 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
720 up_read(&adev->reset_sem);
724 spin_lock(&adev->gmc.invalidate_lock);
727 * It may lose gpuvm invalidate acknowldege state across power-gating
728 * off cycle, add semaphore acquire before invalidation and semaphore
729 * release after invalidation to avoid entering power gated state
733 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
735 for (j = 0; j < adev->usec_timeout; j++) {
736 /* a read return value of 1 means semaphore acuqire */
737 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
738 hub->eng_distance * eng);
744 if (j >= adev->usec_timeout)
745 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
749 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
750 hub->eng_distance * eng, inv_req);
753 * Issue a dummy read to wait for the ACK register to
754 * be cleared to avoid a false ACK due to the new fast
757 if (vmhub == AMDGPU_GFXHUB_0)
758 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
759 hub->eng_distance * eng);
761 for (j = 0; j < adev->usec_timeout; j++) {
762 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
763 hub->eng_distance * eng);
764 if (tmp & (1 << vmid))
773 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
776 * add semaphore release after invalidation,
777 * write with 0 means semaphore release
779 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
780 hub->eng_distance * eng, 0);
782 spin_unlock(&adev->gmc.invalidate_lock);
784 if (j < adev->usec_timeout)
787 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
791 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
793 * @adev: amdgpu_device pointer
794 * @pasid: pasid to be flush
795 * @flush_type: the flush type
796 * @all_hub: flush all hubs
798 * Flush the TLB for the requested pasid.
800 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
801 uint16_t pasid, uint32_t flush_type,
807 uint16_t queried_pasid;
809 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
810 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
812 if (amdgpu_in_reset(adev))
815 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
816 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
817 * heavy-weight TLB flush (type 2), which flushes
818 * both. Due to a race condition with concurrent
819 * memory accesses using the same TLB cache line, we
820 * still need a second TLB flush after this.
822 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
823 adev->asic_type == CHIP_VEGA20);
824 /* 2 dwords flush + 8 dwords fence */
825 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
828 ndw += kiq->pmf->invalidate_tlbs_size;
830 spin_lock(&adev->gfx.kiq.ring_lock);
831 /* 2 dwords flush + 8 dwords fence */
832 amdgpu_ring_alloc(ring, ndw);
834 kiq->pmf->kiq_invalidate_tlbs(ring,
836 kiq->pmf->kiq_invalidate_tlbs(ring,
837 pasid, flush_type, all_hub);
838 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
840 amdgpu_ring_undo(ring);
841 spin_unlock(&adev->gfx.kiq.ring_lock);
842 up_read(&adev->reset_sem);
846 amdgpu_ring_commit(ring);
847 spin_unlock(&adev->gfx.kiq.ring_lock);
848 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
850 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
851 up_read(&adev->reset_sem);
854 up_read(&adev->reset_sem);
858 for (vmid = 1; vmid < 16; vmid++) {
860 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
862 if (ret && queried_pasid == pasid) {
864 for (i = 0; i < adev->num_vmhubs; i++)
865 gmc_v9_0_flush_gpu_tlb(adev, vmid,
868 gmc_v9_0_flush_gpu_tlb(adev, vmid,
869 AMDGPU_GFXHUB_0, flush_type);
879 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
880 unsigned vmid, uint64_t pd_addr)
882 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
883 struct amdgpu_device *adev = ring->adev;
884 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
885 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
886 unsigned eng = ring->vm_inv_eng;
889 * It may lose gpuvm invalidate acknowldege state across power-gating
890 * off cycle, add semaphore acquire before invalidation and semaphore
891 * release after invalidation to avoid entering power gated state
895 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
897 /* a read return value of 1 means semaphore acuqire */
898 amdgpu_ring_emit_reg_wait(ring,
899 hub->vm_inv_eng0_sem +
900 hub->eng_distance * eng, 0x1, 0x1);
902 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
903 (hub->ctx_addr_distance * vmid),
904 lower_32_bits(pd_addr));
906 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
907 (hub->ctx_addr_distance * vmid),
908 upper_32_bits(pd_addr));
910 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
911 hub->eng_distance * eng,
912 hub->vm_inv_eng0_ack +
913 hub->eng_distance * eng,
916 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
919 * add semaphore release after invalidation,
920 * write with 0 means semaphore release
922 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
923 hub->eng_distance * eng, 0);
928 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
931 struct amdgpu_device *adev = ring->adev;
934 /* Do nothing because there's no lut register for mmhub1. */
935 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
938 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
939 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
941 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
943 amdgpu_ring_emit_wreg(ring, reg, pasid);
947 * PTE format on VEGA 10:
956 * 47:12 4k physical page base address
966 * PDE format on VEGA 10:
967 * 63:59 block fragment size
971 * 47:6 physical base address of PD or PTE
978 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
982 case AMDGPU_VM_MTYPE_DEFAULT:
983 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
984 case AMDGPU_VM_MTYPE_NC:
985 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
986 case AMDGPU_VM_MTYPE_WC:
987 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
988 case AMDGPU_VM_MTYPE_RW:
989 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
990 case AMDGPU_VM_MTYPE_CC:
991 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
992 case AMDGPU_VM_MTYPE_UC:
993 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
995 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
999 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1000 uint64_t *addr, uint64_t *flags)
1002 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1003 *addr = adev->vm_manager.vram_base_offset + *addr -
1004 adev->gmc.vram_start;
1005 BUG_ON(*addr & 0xFFFF00000000003FULL);
1007 if (!adev->gmc.translate_further)
1010 if (level == AMDGPU_VM_PDB1) {
1011 /* Set the block fragment size */
1012 if (!(*flags & AMDGPU_PDE_PTE))
1013 *flags |= AMDGPU_PDE_BFS(0x9);
1015 } else if (level == AMDGPU_VM_PDB0) {
1016 if (*flags & AMDGPU_PDE_PTE)
1017 *flags &= ~AMDGPU_PDE_PTE;
1019 *flags |= AMDGPU_PTE_TF;
1023 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1024 struct amdgpu_bo_va_mapping *mapping,
1027 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1028 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1030 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1031 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1033 if (mapping->flags & AMDGPU_PTE_PRT) {
1034 *flags |= AMDGPU_PTE_PRT;
1035 *flags &= ~AMDGPU_PTE_VALID;
1038 if (adev->asic_type == CHIP_ARCTURUS &&
1039 !(*flags & AMDGPU_PTE_SYSTEM) &&
1040 mapping->bo_va->is_xgmi)
1041 *flags |= AMDGPU_PTE_SNOOPED;
1044 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1046 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1049 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1050 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1054 switch (adev->asic_type) {
1057 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1058 size = (REG_GET_FIELD(viewport,
1059 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1060 REG_GET_FIELD(viewport,
1061 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1068 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1069 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1070 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1079 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1080 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1081 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1082 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1083 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1084 .map_mtype = gmc_v9_0_map_mtype,
1085 .get_vm_pde = gmc_v9_0_get_vm_pde,
1086 .get_vm_pte = gmc_v9_0_get_vm_pte,
1087 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1090 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1092 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1095 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1097 switch (adev->asic_type) {
1099 adev->umc.funcs = &umc_v6_0_funcs;
1102 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1103 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1104 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1105 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1106 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1107 adev->umc.funcs = &umc_v6_1_funcs;
1110 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1111 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1112 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1113 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1114 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1115 adev->umc.funcs = &umc_v6_1_funcs;
1122 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1124 switch (adev->asic_type) {
1126 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1129 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1134 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1136 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1139 static int gmc_v9_0_early_init(void *handle)
1141 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1143 gmc_v9_0_set_gmc_funcs(adev);
1144 gmc_v9_0_set_irq_funcs(adev);
1145 gmc_v9_0_set_umc_funcs(adev);
1146 gmc_v9_0_set_mmhub_funcs(adev);
1147 gmc_v9_0_set_gfxhub_funcs(adev);
1149 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1150 adev->gmc.shared_aperture_end =
1151 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1152 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1153 adev->gmc.private_aperture_end =
1154 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1159 static int gmc_v9_0_late_init(void *handle)
1161 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1164 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1169 * Workaround performance drop issue with VBIOS enables partial
1170 * writes, while disables HBM ECC for vega10.
1172 if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1173 if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1174 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1175 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1179 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1180 adev->mmhub.funcs->reset_ras_error_count(adev);
1182 r = amdgpu_gmc_ras_late_init(adev);
1186 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1189 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1190 struct amdgpu_gmc *mc)
1194 if (!amdgpu_sriov_vf(adev))
1195 base = adev->mmhub.funcs->get_fb_location(adev);
1197 /* add the xgmi offset of the physical node */
1198 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1199 amdgpu_gmc_vram_location(adev, mc, base);
1200 amdgpu_gmc_gart_location(adev, mc);
1201 amdgpu_gmc_agp_location(adev, mc);
1202 /* base offset of vram pages */
1203 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1205 /* XXX: add the xgmi offset of the physical node? */
1206 adev->vm_manager.vram_base_offset +=
1207 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1211 * gmc_v9_0_mc_init - initialize the memory controller driver params
1213 * @adev: amdgpu_device pointer
1215 * Look up the amount of vram, vram width, and decide how to place
1216 * vram and gart within the GPU's physical address space.
1217 * Returns 0 for success.
1219 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1223 /* size in MB on si */
1224 adev->gmc.mc_vram_size =
1225 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1226 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1228 if (!(adev->flags & AMD_IS_APU)) {
1229 r = amdgpu_device_resize_fb_bar(adev);
1233 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1234 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1236 #ifdef CONFIG_X86_64
1237 if (adev->flags & AMD_IS_APU) {
1238 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1239 adev->gmc.aper_size = adev->gmc.real_vram_size;
1242 /* In case the PCI BAR is larger than the actual amount of vram */
1243 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1244 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1245 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1247 /* set the gart size */
1248 if (amdgpu_gart_size == -1) {
1249 switch (adev->asic_type) {
1250 case CHIP_VEGA10: /* all engines support GPUVM */
1251 case CHIP_VEGA12: /* all engines support GPUVM */
1255 adev->gmc.gart_size = 512ULL << 20;
1257 case CHIP_RAVEN: /* DCE SG support */
1259 adev->gmc.gart_size = 1024ULL << 20;
1263 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1266 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1271 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1275 if (adev->gart.bo) {
1276 WARN(1, "VEGA10 PCIE GART already initialized\n");
1279 /* Initialize common gart structure */
1280 r = amdgpu_gart_init(adev);
1283 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1284 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1285 AMDGPU_PTE_EXECUTABLE;
1286 return amdgpu_gart_table_vram_alloc(adev);
1290 * gmc_v9_0_save_registers - saves regs
1292 * @adev: amdgpu_device pointer
1294 * This saves potential register values that should be
1295 * restored upon resume
1297 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1299 if (adev->asic_type == CHIP_RAVEN)
1300 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1303 static int gmc_v9_0_sw_init(void *handle)
1305 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308 adev->gfxhub.funcs->init(adev);
1310 adev->mmhub.funcs->init(adev);
1312 spin_lock_init(&adev->gmc.invalidate_lock);
1314 r = amdgpu_atomfirmware_get_vram_info(adev,
1315 &vram_width, &vram_type, &vram_vendor);
1316 if (amdgpu_sriov_vf(adev))
1317 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1318 * and DF related registers is not readable, seems hardcord is the
1319 * only way to set the correct vram_width
1321 adev->gmc.vram_width = 2048;
1322 else if (amdgpu_emu_mode != 1)
1323 adev->gmc.vram_width = vram_width;
1325 if (!adev->gmc.vram_width) {
1326 int chansize, numchan;
1328 /* hbm memory channel size */
1329 if (adev->flags & AMD_IS_APU)
1334 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1335 adev->gmc.vram_width = numchan * chansize;
1338 adev->gmc.vram_type = vram_type;
1339 adev->gmc.vram_vendor = vram_vendor;
1340 switch (adev->asic_type) {
1342 adev->num_vmhubs = 2;
1344 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1345 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1347 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1348 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1349 adev->gmc.translate_further =
1350 adev->vm_manager.num_level > 1;
1357 adev->num_vmhubs = 2;
1361 * To fulfill 4-level page support,
1362 * vm size is 256TB (48bit), maximum size of Vega10,
1363 * block size 512 (9bit)
1365 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1366 if (amdgpu_sriov_vf(adev))
1367 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1369 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1372 adev->num_vmhubs = 3;
1374 /* Keep the vm size same with Vega20 */
1375 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1381 /* This interrupt is VMC page fault.*/
1382 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1383 &adev->gmc.vm_fault);
1387 if (adev->asic_type == CHIP_ARCTURUS) {
1388 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1389 &adev->gmc.vm_fault);
1394 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1395 &adev->gmc.vm_fault);
1400 if (!amdgpu_sriov_vf(adev)) {
1401 /* interrupt sent to DF. */
1402 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1403 &adev->gmc.ecc_irq);
1408 /* Set the internal MC address mask
1409 * This is the max address of the GPU's
1410 * internal address space.
1412 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1414 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1416 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1419 adev->need_swiotlb = drm_need_swiotlb(44);
1421 if (adev->gmc.xgmi.supported) {
1422 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1427 r = gmc_v9_0_mc_init(adev);
1431 amdgpu_gmc_get_vbios_allocations(adev);
1433 /* Memory manager */
1434 r = amdgpu_bo_init(adev);
1438 r = gmc_v9_0_gart_init(adev);
1444 * VMID 0 is reserved for System
1445 * amdgpu graphics/compute will use VMIDs 1..n-1
1446 * amdkfd will use VMIDs n..15
1448 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1449 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1450 * for video processing.
1452 adev->vm_manager.first_kfd_vmid =
1453 adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
1455 amdgpu_vm_manager_init(adev);
1457 gmc_v9_0_save_registers(adev);
1462 static int gmc_v9_0_sw_fini(void *handle)
1464 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466 amdgpu_gmc_ras_fini(adev);
1467 amdgpu_gem_force_release(adev);
1468 amdgpu_vm_manager_fini(adev);
1469 amdgpu_gart_table_vram_free(adev);
1470 amdgpu_bo_fini(adev);
1471 amdgpu_gart_fini(adev);
1476 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1479 switch (adev->asic_type) {
1481 if (amdgpu_sriov_vf(adev))
1485 soc15_program_register_sequence(adev,
1486 golden_settings_mmhub_1_0_0,
1487 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1488 soc15_program_register_sequence(adev,
1489 golden_settings_athub_1_0_0,
1490 ARRAY_SIZE(golden_settings_athub_1_0_0));
1495 /* TODO for renoir */
1496 soc15_program_register_sequence(adev,
1497 golden_settings_athub_1_0_0,
1498 ARRAY_SIZE(golden_settings_athub_1_0_0));
1506 * gmc_v9_0_restore_registers - restores regs
1508 * @adev: amdgpu_device pointer
1510 * This restores register values, saved at suspend.
1512 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1514 if (adev->asic_type == CHIP_RAVEN) {
1515 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1516 WARN_ON(adev->gmc.sdpif_register !=
1517 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1522 * gmc_v9_0_gart_enable - gart enable
1524 * @adev: amdgpu_device pointer
1526 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1530 if (adev->gart.bo == NULL) {
1531 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1534 r = amdgpu_gart_table_vram_pin(adev);
1538 r = adev->gfxhub.funcs->gart_enable(adev);
1542 r = adev->mmhub.funcs->gart_enable(adev);
1546 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1547 (unsigned)(adev->gmc.gart_size >> 20),
1548 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1549 adev->gart.ready = true;
1553 static int gmc_v9_0_hw_init(void *handle)
1555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559 /* The sequence of these two function calls matters.*/
1560 gmc_v9_0_init_golden_registers(adev);
1562 if (adev->mode_info.num_crtc) {
1563 /* Lockout access through VGA aperture*/
1564 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1565 /* disable VGA render */
1566 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1569 if (adev->mmhub.funcs->update_power_gating)
1570 adev->mmhub.funcs->update_power_gating(adev, true);
1572 adev->hdp.funcs->init_registers(adev);
1574 /* After HDP is initialized, flush HDP.*/
1575 adev->hdp.funcs->flush_hdp(adev, NULL);
1577 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1582 if (!amdgpu_sriov_vf(adev)) {
1583 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1584 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1586 for (i = 0; i < adev->num_vmhubs; ++i)
1587 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1589 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1590 adev->umc.funcs->init_registers(adev);
1592 r = gmc_v9_0_gart_enable(adev);
1598 * gmc_v9_0_gart_disable - gart disable
1600 * @adev: amdgpu_device pointer
1602 * This disables all VM page table.
1604 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1606 adev->gfxhub.funcs->gart_disable(adev);
1607 adev->mmhub.funcs->gart_disable(adev);
1608 amdgpu_gart_table_vram_unpin(adev);
1611 static int gmc_v9_0_hw_fini(void *handle)
1613 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1615 if (amdgpu_sriov_vf(adev)) {
1616 /* full access mode, so don't touch any GMC register */
1617 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1621 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1622 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1623 gmc_v9_0_gart_disable(adev);
1628 static int gmc_v9_0_suspend(void *handle)
1630 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1632 return gmc_v9_0_hw_fini(adev);
1635 static int gmc_v9_0_resume(void *handle)
1638 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1640 r = gmc_v9_0_hw_init(adev);
1644 amdgpu_vmid_reset_all(adev);
1649 static bool gmc_v9_0_is_idle(void *handle)
1651 /* MC is always ready in GMC v9.*/
1655 static int gmc_v9_0_wait_for_idle(void *handle)
1657 /* There is no need to wait for MC idle in GMC v9.*/
1661 static int gmc_v9_0_soft_reset(void *handle)
1663 /* XXX for emulation.*/
1667 static int gmc_v9_0_set_clockgating_state(void *handle,
1668 enum amd_clockgating_state state)
1670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 adev->mmhub.funcs->set_clockgating(adev, state);
1674 athub_v1_0_set_clockgating(adev, state);
1679 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1681 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1683 adev->mmhub.funcs->get_clockgating(adev, flags);
1685 athub_v1_0_get_clockgating(adev, flags);
1688 static int gmc_v9_0_set_powergating_state(void *handle,
1689 enum amd_powergating_state state)
1694 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1696 .early_init = gmc_v9_0_early_init,
1697 .late_init = gmc_v9_0_late_init,
1698 .sw_init = gmc_v9_0_sw_init,
1699 .sw_fini = gmc_v9_0_sw_fini,
1700 .hw_init = gmc_v9_0_hw_init,
1701 .hw_fini = gmc_v9_0_hw_fini,
1702 .suspend = gmc_v9_0_suspend,
1703 .resume = gmc_v9_0_resume,
1704 .is_idle = gmc_v9_0_is_idle,
1705 .wait_for_idle = gmc_v9_0_wait_for_idle,
1706 .soft_reset = gmc_v9_0_soft_reset,
1707 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1708 .set_powergating_state = gmc_v9_0_set_powergating_state,
1709 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1712 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1714 .type = AMD_IP_BLOCK_TYPE_GMC,
1718 .funcs = &gmc_v9_0_ip_funcs,