drm/amdgpu: further move TLB hw workarounds a layer up
authorChristian König <christian.koenig@amd.com>
Mon, 4 Sep 2023 15:41:11 +0000 (17:41 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 26 Sep 2023 20:55:09 +0000 (16:55 -0400)
For the PASID flushing we already handled that at a higher layer, apply
those workarounds to the standard flush as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

index 230f06a..f74a51a 100644 (file)
@@ -604,6 +604,14 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                if (!down_read_trylock(&adev->reset_domain->sem))
                        return;
 
+               if (adev->gmc.flush_tlb_needs_extra_type_2)
+                       adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
+                                                          vmhub, 2);
+
+               if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
+                       adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
+                                                          vmhub, 0);
+
                adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
                                                   flush_type);
                up_read(&adev->reset_domain->sem);
@@ -654,6 +662,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
 
        if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
            !down_read_trylock(&adev->reset_domain->sem)) {
+
+               if (adev->gmc.flush_tlb_needs_extra_type_2)
+                       adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+                                                                2, all_hub,
+                                                                inst);
+
+               if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
+                       adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+                                                                0, all_hub,
+                                                                inst);
+
                adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
                                                         flush_type, all_hub,
                                                         inst);
index 0f55886..3630b53 100644 (file)
@@ -814,37 +814,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                                        uint32_t vmhub, uint32_t flush_type)
 {
        bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
-       u32 j, inv_req, inv_req2, tmp, sem, req, ack;
+       u32 j, inv_req, tmp, sem, req, ack;
        const unsigned int eng = 17;
        struct amdgpu_vmhub *hub;
 
        BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
 
        hub = &adev->vmhub[vmhub];
+       inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
        sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
        req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
        ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
-       if (adev->gmc.xgmi.num_physical_nodes &&
-           amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0)) {
-               /* Vega20+XGMI caches PTEs in TC and TLB. Add a
-                * heavy-weight TLB flush (type 2), which flushes
-                * both. Due to a race condition with concurrent
-                * memory accesses using the same TLB cache line, we
-                * still need a second TLB flush after this.
-                */
-               inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
-               inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
-       } else if (flush_type == 2 &&
-                  amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
-                  adev->rev_id == 0) {
-               inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);
-               inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
-       } else {
-               inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
-               inv_req2 = 0;
-       }
-
        /* This is necessary for a HW workaround under SRIOV as well
         * as GFXOFF under bare metal
         */
@@ -855,10 +836,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
                                                   1 << vmid);
-               if (inv_req2)
-                       amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack,
-                                                          inv_req2, 1 << vmid);
-
                return;
        }
 
@@ -888,34 +865,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                        DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
        }
 
-       do {
-               if (vmhub >= AMDGPU_MMHUB0(0))
-                       WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req);
-               else
-                       WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req);
-
-               /*
-                * Issue a dummy read to wait for the ACK register to
-                * be cleared to avoid a false ACK due to the new fast
-                * GRBM interface.
-                */
-               if ((vmhub == AMDGPU_GFXHUB(0)) &&
-                   (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
-                       RREG32_SOC15_IP_NO_KIQ(GC, req);
+       if (vmhub >= AMDGPU_MMHUB0(0))
+               WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req);
+       else
+               WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req);
 
-               for (j = 0; j < adev->usec_timeout; j++) {
-                       if (vmhub >= AMDGPU_MMHUB0(0))
-                               tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack);
-                       else
-                               tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack);
-                       if (tmp & (1 << vmid))
-                               break;
-                       udelay(1);
-               }
+       /*
+        * Issue a dummy read to wait for the ACK register to
+        * be cleared to avoid a false ACK due to the new fast
+        * GRBM interface.
+        */
+       if ((vmhub == AMDGPU_GFXHUB(0)) &&
+           (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
+               RREG32_NO_KIQ(req);
 
-               inv_req = inv_req2;
-               inv_req2 = 0;
-       } while (inv_req);
+       for (j = 0; j < adev->usec_timeout; j++) {
+               if (vmhub >= AMDGPU_MMHUB0(0))
+                       tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack);
+               else
+                       tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack);
+               if (tmp & (1 << vmid))
+                       break;
+               udelay(1);
+       }
 
        /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
        if (use_semaphore) {