2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
29 int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
33 unsigned int table_entries;
34 struct pp_power_state *state;
37 if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
40 if (hwmgr->hwmgr_func->get_power_state_size == NULL)
43 hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
45 hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
46 sizeof(struct pp_power_state);
48 if (table_entries == 0 || size == 0) {
49 pr_warn("Please check whether power state management is supported on this asic\n");
53 hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
54 if (hwmgr->ps == NULL)
57 hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
58 if (hwmgr->request_ps == NULL) {
64 hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
65 if (hwmgr->current_ps == NULL) {
66 kfree(hwmgr->request_ps);
68 hwmgr->request_ps = NULL;
75 for (i = 0; i < table_entries; i++) {
76 result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
78 kfree(hwmgr->request_ps);
80 hwmgr->request_ps = NULL;
85 if (state->classification.flags & PP_StateClassificationFlag_Boot) {
86 hwmgr->boot_ps = state;
87 memcpy(hwmgr->current_ps, state, size);
88 memcpy(hwmgr->request_ps, state, size);
91 state->id = i + 1; /* assigned unique num for every power state id */
93 if (state->classification.flags & PP_StateClassificationFlag_Uvd)
94 hwmgr->uvd_ps = state;
95 state = (struct pp_power_state *)((unsigned long)state + size);
101 int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
109 kfree(hwmgr->current_ps);
110 kfree(hwmgr->request_ps);
112 hwmgr->request_ps = NULL;
114 hwmgr->current_ps = NULL;
118 static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
119 enum PP_StateUILabel ui_label,
120 unsigned long *state_id)
122 struct pp_power_state *state;
126 table_entries = hwmgr->num_ps;
129 for (i = 0; i < table_entries; i++) {
130 if (state->classification.ui_label & ui_label) {
131 *state_id = state->id;
134 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
139 static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
140 enum PP_StateClassificationFlag flag,
141 unsigned long *state_id)
143 struct pp_power_state *state;
147 table_entries = hwmgr->num_ps;
150 for (i = 0; i < table_entries; i++) {
151 if (state->classification.flags & flag) {
152 *state_id = state->id;
155 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
160 static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
162 struct pp_power_state *state;
166 table_entries = hwmgr->num_ps;
170 for (i = 0; i < table_entries; i++) {
171 if (state->id == state_id) {
172 memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
175 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
180 int psm_set_boot_states(struct pp_hwmgr *hwmgr)
182 unsigned long state_id;
188 if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
190 ret = psm_set_states(hwmgr, state_id);
195 int psm_set_performance_states(struct pp_hwmgr *hwmgr)
197 unsigned long state_id;
203 if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
205 ret = psm_set_states(hwmgr, state_id);
210 int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
211 enum PP_StateUILabel label_id,
212 struct pp_power_state **state)
220 table_entries = hwmgr->num_ps;
224 for (i = 0; i < table_entries; i++) {
225 if ((*state)->classification.ui_label & label_id)
227 *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
231 case PP_StateUILabel_Battery:
232 case PP_StateUILabel_Balanced:
233 label_id = PP_StateUILabel_Performance;
241 static void power_state_management(struct pp_hwmgr *hwmgr,
242 struct pp_power_state *new_ps)
244 struct pp_power_state *pcurrent;
245 struct pp_power_state *requested;
251 requested = hwmgr->request_ps;
253 pcurrent = hwmgr->current_ps;
255 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
256 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
257 &pcurrent->hardware, &requested->hardware, &equal)))
260 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
261 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
262 memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
266 int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_settings,
267 struct pp_power_state *new_ps)
273 if (!skip_display_settings)
274 phm_display_configuration_changed(hwmgr);
277 power_state_management(hwmgr, new_ps);
280 * for vega12/vega20 which does not support power state manager
281 * DAL clock limits should also be honoured
283 phm_apply_clock_adjust_rules(hwmgr);
285 if (!skip_display_settings)
286 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
289 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
290 hwmgr->dpm_level = hwmgr->request_dpm_level;
292 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
293 index = fls(hwmgr->workload_mask);
294 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
295 workload = hwmgr->workload_setting[index];
297 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
298 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);