drm/amdgpu: Rename to ras_*_enabled
authorLuben Tuikov <luben.tuikov@amd.com>
Tue, 4 May 2021 06:25:29 +0000 (02:25 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 10 May 2021 22:08:12 +0000 (18:08 -0400)
Rename,
  ras_hw_supported --> ras_hw_enabled, and
  ras_features     --> ras_enabled,
to show that ras_enabled is a subset of
ras_hw_enabled, which itself is a subset
of the ASIC capability.

Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: John Clements <john.clements@amd.com>
Cc: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: John Clements <John.Clements@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c

index ba74054..cef7bbe 100644 (file)
@@ -1073,8 +1073,8 @@ struct amdgpu_device {
 
        atomic_t                        throttling_logging_enabled;
        struct ratelimit_state          throttling_logging_rs;
-       uint32_t                        ras_hw_supported;
-       uint32_t                        ras_features;
+       uint32_t                        ras_hw_enabled;
+       uint32_t                        ras_enabled;
 
        bool                            in_pci_err_recovery;
        struct pci_saved_state          *pci_state;
index 0ed1142..b0543f4 100644 (file)
@@ -5108,7 +5108,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
        if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
                return -ENOTSUPP;
 
-       if (ras && adev->ras_features &&
+       if (ras && adev->ras_enabled &&
            adev->nbio.funcs->enable_doorbell_interrupt)
                adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
 
@@ -5128,7 +5128,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
        if (ret)
                return ret;
 
-       if (ras && adev->ras_features &&
+       if (ras && adev->ras_enabled &&
            adev->nbio.funcs->enable_doorbell_interrupt)
                adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
 
index 7a6a87e..8d12e47 100644 (file)
@@ -986,7 +986,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
                if (!ras)
                        return -EINVAL;
-               ras_mask = (uint64_t)adev->ras_features << 32 | ras->features;
+               ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
 
                return copy_to_user(out, &ras_mask,
                                min_t(u64, size, sizeof(ras_mask))) ?
index 4885b71..3179ca9 100644 (file)
@@ -2146,7 +2146,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
                return 0;
 
        if ((amdgpu_in_reset(adev) &&
-            ras && adev->ras_features &&
+            ras && adev->ras_enabled &&
             (adev->asic_type == CHIP_ARCTURUS ||
              adev->asic_type == CHIP_VEGA20)) ||
             (adev->in_runpm &&
index e7940e8..444f532 100644 (file)
@@ -532,7 +532,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return NULL;
 
        if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
@@ -559,7 +559,7 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
        struct ras_manager *obj;
        int i;
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return NULL;
 
        if (head) {
@@ -613,7 +613,7 @@ static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
                                         struct ras_common_if *head)
 {
-       return adev->ras_hw_supported & BIT(head->block);
+       return adev->ras_hw_enabled & BIT(head->block);
 }
 
 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
@@ -767,7 +767,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
                        ret = amdgpu_ras_feature_enable(adev, head, 0);
 
                        /* clean gfx block ras features flag */
-                       if (adev->ras_features && head->block == AMDGPU_RAS_BLOCK__GFX)
+                       if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
                                con->features &= ~BIT(head->block);
                }
        } else
@@ -1072,7 +1072,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
        struct ras_manager *obj;
        struct ras_err_data data = {0, 0};
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return 0;
 
        list_for_each_entry(obj, &con->head, node) {
@@ -1595,7 +1595,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return;
 
        list_for_each_entry(obj, &con->head, node) {
@@ -1645,7 +1645,7 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return;
 
        list_for_each_entry(obj, &con->head, node) {
@@ -1959,7 +1959,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
        bool exc_err_limit = false;
        int ret;
 
-       if (adev->ras_features && con)
+       if (adev->ras_enabled && con)
                data = &con->eh_data;
        else
                return 0;
@@ -2076,7 +2076,7 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
 
        if (strnstr(ctx->vbios_version, "D16406",
                    sizeof(ctx->vbios_version)))
-               adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX);
+               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
 }
 
 /*
@@ -2090,7 +2090,7 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
  */
 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
 {
-       adev->ras_hw_supported = adev->ras_features = 0;
+       adev->ras_hw_enabled = adev->ras_enabled = 0;
 
        if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
            !amdgpu_ras_asic_supported(adev))
@@ -2099,7 +2099,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
        if (!adev->gmc.xgmi.connected_to_cpu) {
                if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
                        dev_info(adev->dev, "MEM ECC is active.\n");
-                       adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+                       adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
                                                   1 << AMDGPU_RAS_BLOCK__DF);
                } else {
                        dev_info(adev->dev, "MEM ECC is not presented.\n");
@@ -2107,7 +2107,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
 
                if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
                        dev_info(adev->dev, "SRAM ECC is active.\n");
-                       adev->ras_hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+                       adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
                                                    1 << AMDGPU_RAS_BLOCK__DF);
                } else {
                        dev_info(adev->dev, "SRAM ECC is not presented.\n");
@@ -2115,7 +2115,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
        } else {
                /* driver only manages a few IP blocks RAS feature
                 * when GPU is connected cpu through XGMI */
-               adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
+               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
                                           1 << AMDGPU_RAS_BLOCK__SDMA |
                                           1 << AMDGPU_RAS_BLOCK__MMHUB);
        }
@@ -2123,10 +2123,10 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
        amdgpu_ras_get_quirks(adev);
 
        /* hw_supported needs to be aligned with RAS block mask. */
-       adev->ras_hw_supported &= AMDGPU_RAS_BLOCK_MASK;
+       adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
 
-       adev->ras_features = amdgpu_ras_enable == 0 ? 0 :
-               adev->ras_hw_supported & amdgpu_ras_mask;
+       adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
+               adev->ras_hw_enabled & amdgpu_ras_mask;
 }
 
 int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -2149,11 +2149,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
 
        amdgpu_ras_check_supported(adev);
 
-       if (!adev->ras_hw_supported || adev->asic_type == CHIP_VEGA10) {
+       if (!adev->ras_hw_enabled || adev->asic_type == CHIP_VEGA10) {
                /* set gfx block ras context feature for VEGA20 Gaming
                 * send ras disable cmd to ras ta during ras late init.
                 */
-               if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) {
+               if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
                        con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
 
                        return 0;
@@ -2204,7 +2204,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
 
        dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
                 "hardware ability[%x] ras_mask[%x]\n",
-                adev->ras_hw_supported, adev->ras_features);
+                adev->ras_hw_enabled, adev->ras_enabled);
 
        return 0;
 release_con:
@@ -2319,7 +2319,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;
 
-       if (!adev->ras_features || !con) {
+       if (!adev->ras_enabled || !con) {
                /* clean ras context for VEGA20 Gaming after send ras disable cmd */
                amdgpu_release_ras_context(adev);
 
@@ -2365,7 +2365,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return;
 
        amdgpu_ras_disable_all_features(adev, 0);
@@ -2379,7 +2379,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return 0;
 
        /* Need disable ras on all IPs here before ip [hw/sw]fini */
@@ -2392,7 +2392,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
-       if (!adev->ras_features || !con)
+       if (!adev->ras_enabled || !con)
                return 0;
 
        amdgpu_ras_fs_fini(adev);
@@ -2412,7 +2412,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
 {
        amdgpu_ras_check_supported(adev);
-       if (!adev->ras_hw_supported)
+       if (!adev->ras_hw_enabled)
                return;
 
        if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
@@ -2441,7 +2441,7 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev)
        if (!con)
                return;
 
-       if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
+       if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
                con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
                amdgpu_ras_set_context(adev, NULL);
                kfree(con);
index f60d1cf..201fbde 100644 (file)
@@ -475,7 +475,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
 
        if (block >= AMDGPU_RAS_BLOCK_COUNT)
                return 0;
-       return ras && (adev->ras_features & (1 << block));
+       return ras && (adev->ras_enabled & (1 << block));
 }
 
 int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
index 6028b55..093ab98 100644 (file)
@@ -1262,7 +1262,7 @@ static int gmc_v9_0_late_init(void *handle)
         * writes, while disables HBM ECC for vega10.
         */
        if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
-               if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
+               if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
                        if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
                                adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
                }
index 301695f..49ece2a 100644 (file)
@@ -655,7 +655,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
        int ret = 0;
 
        /* avoid NBIF got stuck when do RAS recovery in BACO reset */
-       if (ras && adev->ras_features)
+       if (ras && adev->ras_enabled)
                adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
 
        ret = amdgpu_dpm_baco_reset(adev);
@@ -663,7 +663,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
                return ret;
 
        /* re-enable doorbell interrupt after BACO exit */
-       if (ras && adev->ras_features)
+       if (ras && adev->ras_enabled)
                adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
 
        return 0;
@@ -710,7 +710,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
                 * 1. PMFW version > 0x284300: all cases use baco
                 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
                 */
-               if (ras && adev->ras_features &&
+               if (ras && adev->ras_enabled &&
                    adev->pm.fw_version <= 0x283400)
                        baco_reset = false;
                break;
index fb4f718..7fae6a7 100644 (file)
@@ -1430,13 +1430,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
        adev = (struct amdgpu_device *)(dev->gpu->kgd);
        /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
        dev->node_props.capability |=
-               ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
+               ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
                HSA_CAP_SRAM_EDCSUPPORTED : 0;
-       dev->node_props.capability |= ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+       dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
                HSA_CAP_MEM_EDCSUPPORTED : 0;
 
        if (adev->asic_type != CHIP_VEGA10)
-               dev->node_props.capability |= (adev->ras_features != 0) ?
+               dev->node_props.capability |= (adev->ras_enabled != 0) ?
                        HSA_CAP_RASEVENTNOTIFY : 0;
 
        /* SVM API and HMM page migration work together, device memory type
index f6b1efc..8d99c7a 100644 (file)
@@ -85,7 +85,7 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
                return 0;
 
        if (state == BACO_STATE_IN) {
-               if (!ras || !adev->ras_features) {
+               if (!ras || !adev->ras_enabled) {
                        data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
                        data |= 0x80000000;
                        WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
index 72581e4..a06e686 100644 (file)
@@ -1531,7 +1531,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
                                                                      NULL);
                        break;
                default:
-                       if (!ras || !adev->ras_features ||
+                       if (!ras || !adev->ras_enabled ||
                            adev->gmc.xgmi.pending_reset) {
                                if (adev->asic_type == CHIP_ARCTURUS) {
                                        data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);