Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.
v2: add get_pp_clk existence check and use kzalloc to allocate buf
v3: return -ENOMEM for allocation failure and correct the coding style
Signed-off-by: Yintian Tao <yttao@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk) {
+ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
goto fail;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgim_is_hwperf(adev) &&
+ adev->virt.ops->force_dpm_level) {
+ mutex_lock(&adev->pm.mutex);
+ adev->virt.ops->force_dpm_level(adev, level);
+ mutex_unlock(&adev->pm.mutex);
+ return count;
+ } else {
+ return -EINVAL;
+ }
+ }
+
if (current_level == level)
return count;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk)
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
if (is_support_sw_smu(adev))
return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
}
}
+static uint32_t parse_clk(char *buf, bool min)
+{
+ char *ptr = buf;
+ uint32_t clk = 0;
+
+ do {
+ ptr = strchr(ptr, ':');
+ if (!ptr)
+ break;
+ ptr+=2;
+ clk = simple_strtoul(ptr, NULL, 10);
+ } while (!min);
+
+ return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* HW PERF SIM in GIM */
+ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
+ /* protect DPM events to GIM */
+ struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
#endif
}
+#define amdgim_is_hwperf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
xgpu_ai_mailbox_set_valid(adev, false);
}
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+ int r = 0;
+ u32 req, val, size;
+
+ if (!amdgim_is_hwperf(adev) || buf == NULL)
+ return -EBADRQC;
+
+ switch(type) {
+ case PP_SCLK:
+ req = IDH_IRQ_GET_PP_SCLK;
+ break;
+ case PP_MCLK:
+ req = IDH_IRQ_GET_PP_MCLK;
+ break;
+ default:
+ return -EBADRQC;
+ }
+
+ mutex_lock(&adev->virt.dpm_mutex);
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r && adev->fw_vram_usage.va != NULL) {
+ val = RREG32_NO_KIQ(
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+ val), PAGE_SIZE);
+
+ if (size < PAGE_SIZE)
+ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+ else
+ size = 0;
+
+ r = size;
+ goto out;
+ }
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if(r)
+ pr_info("%s DPM request failed",
+ (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+ int r = 0;
+ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+ if (!amdgim_is_hwperf(adev))
+ return -EBADRQC;
+
+ mutex_lock(&adev->virt.dpm_mutex);
+ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r)
+ goto out;
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if (!r)
+ pr_info("DPM request failed");
+ else
+ pr_info("Mailbox is broken");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
};
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_IRQ_FORCE_DPM_LEVEL = 10,
+ IDH_IRQ_GET_PP_SCLK,
+ IDH_IRQ_GET_PP_MCLK,
+
IDH_LOG_VF_ERROR = 200,
};
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
IDH_EVENT_MAX
};