Merge tag 'linux-watchdog-6.9-rc1' of git://www.linux-watchdog.org/linux-watchdog
[linux-2.6-microblaze.git] / drivers / cpufreq / amd-pstate.c
index 1791d37..2015c9f 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/uaccess.h>
 #include <linux/static_call.h>
 #include <linux/amd-pstate.h>
+#include <linux/topology.h>
 
 #include <acpi/processor.h>
 #include <acpi/cppc_acpi.h>
@@ -49,6 +50,7 @@
 
 #define AMD_PSTATE_TRANSITION_LATENCY  20000
 #define AMD_PSTATE_TRANSITION_DELAY    1000
+#define AMD_PSTATE_PREFCORE_THRESHOLD  166
 
 /*
  * TODO: We need more time to fine tune processors with shared memory solution
@@ -64,6 +66,7 @@ static struct cpufreq_driver amd_pstate_driver;
 static struct cpufreq_driver amd_pstate_epp_driver;
 static int cppc_state = AMD_PSTATE_UNDEFINED;
 static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
 
 /*
  * AMD Energy Preference Performance (EPP)
@@ -297,13 +300,14 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
        if (ret)
                return ret;
 
-       /*
-        * TODO: Introduce AMD specific power feature.
-        *
-        * CPPC entry doesn't indicate the highest performance in some ASICs.
+       /* For platforms that do not support the preferred core feature, the
+        * highest_pef may be configured with 166 or 255, to avoid max frequency
+        * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
+        * the default max perf.
         */
-       highest_perf = amd_get_highest_perf();
-       if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+       if (cpudata->hw_prefcore)
+               highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+       else
                highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -311,6 +315,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
        WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+       WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
        WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
        return 0;
 }
@@ -324,8 +329,9 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
        if (ret)
                return ret;
 
-       highest_perf = amd_get_highest_perf();
-       if (highest_perf > cppc_perf.highest_perf)
+       if (cpudata->hw_prefcore)
+               highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+       else
                highest_perf = cppc_perf.highest_perf;
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
@@ -334,6 +340,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
        WRITE_ONCE(cpudata->lowest_nonlinear_perf,
                   cppc_perf.lowest_nonlinear_perf);
        WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+       WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
        WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
 
        if (cppc_state == AMD_PSTATE_ACTIVE)
@@ -477,12 +484,19 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
 
 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
 {
-       u32 max_limit_perf, min_limit_perf;
+       u32 max_limit_perf, min_limit_perf, lowest_perf;
        struct amd_cpudata *cpudata = policy->driver_data;
 
        max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
        min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
 
+       lowest_perf = READ_ONCE(cpudata->lowest_perf);
+       if (min_limit_perf < lowest_perf)
+               min_limit_perf = lowest_perf;
+
+       if (max_limit_perf < min_limit_perf)
+               max_limit_perf = min_limit_perf;
+
        WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
        WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
        WRITE_ONCE(cpudata->max_limit_freq, policy->max);
@@ -570,7 +584,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
        if (target_perf < capacity)
                des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
 
-       min_perf = READ_ONCE(cpudata->highest_perf);
+       min_perf = READ_ONCE(cpudata->lowest_perf);
        if (_min_perf < capacity)
                min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
 
@@ -706,6 +720,114 @@ static void amd_perf_ctl_reset(unsigned int cpu)
        wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
 }
 
+/*
+ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
+{
+       sched_set_itmt_support();
+}
+static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
+
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+{
+       int ret;
+
+       if (boot_cpu_has(X86_FEATURE_CPPC)) {
+               u64 cap1;
+
+               ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+               if (ret)
+                       return ret;
+               WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+       } else {
+               u64 cppc_highest_perf;
+
+               ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
+               if (ret)
+                       return ret;
+               WRITE_ONCE(*highest_perf, cppc_highest_perf);
+       }
+
+       return (ret);
+}
+
+#define CPPC_MAX_PERF  U8_MAX
+
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+       int ret, prio;
+       u32 highest_perf;
+
+       ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
+       if (ret)
+               return;
+
+       cpudata->hw_prefcore = true;
+       /* check if CPPC preferred core feature is enabled*/
+       if (highest_perf < CPPC_MAX_PERF)
+               prio = (int)highest_perf;
+       else {
+               pr_debug("AMD CPPC preferred core is unsupported!\n");
+               cpudata->hw_prefcore = false;
+               return;
+       }
+
+       if (!amd_pstate_prefcore)
+               return;
+
+       /*
+        * The priorities can be set regardless of whether or not
+        * sched_set_itmt_support(true) has been called and it is valid to
+        * update them at any time after it has been called.
+        */
+       sched_set_itmt_core_prio(prio, cpudata->cpu);
+
+       schedule_work(&sched_prefcore_work);
+}
+
+static void amd_pstate_update_limits(unsigned int cpu)
+{
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+       struct amd_cpudata *cpudata = policy->driver_data;
+       u32 prev_high = 0, cur_high = 0;
+       int ret;
+       bool highest_perf_changed = false;
+
+       mutex_lock(&amd_pstate_driver_lock);
+       if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
+               goto free_cpufreq_put;
+
+       ret = amd_pstate_get_highest_perf(cpu, &cur_high);
+       if (ret)
+               goto free_cpufreq_put;
+
+       prev_high = READ_ONCE(cpudata->prefcore_ranking);
+       if (prev_high != cur_high) {
+               highest_perf_changed = true;
+               WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
+
+               if (cur_high < CPPC_MAX_PERF)
+                       sched_set_itmt_core_prio((int)cur_high, cpu);
+       }
+
+free_cpufreq_put:
+       cpufreq_cpu_put(policy);
+
+       if (!highest_perf_changed)
+               cpufreq_update_policy(cpu);
+
+       mutex_unlock(&amd_pstate_driver_lock);
+}
+
 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 {
        int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -727,6 +849,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpudata->cpu = policy->cpu;
 
+       amd_pstate_init_prefcore(cpudata);
+
        ret = amd_pstate_init_perf(cpudata);
        if (ret)
                goto free_cpudata1;
@@ -877,6 +1001,28 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
        return sysfs_emit(buf, "%u\n", perf);
 }
 
+static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
+                                               char *buf)
+{
+       u32 perf;
+       struct amd_cpudata *cpudata = policy->driver_data;
+
+       perf = READ_ONCE(cpudata->prefcore_ranking);
+
+       return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+                                          char *buf)
+{
+       bool hw_prefcore;
+       struct amd_cpudata *cpudata = policy->driver_data;
+
+       hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+       return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
 static ssize_t show_energy_performance_available_preferences(
                                struct cpufreq_policy *policy, char *buf)
 {
@@ -1074,18 +1220,29 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
        return ret < 0 ? ret : count;
 }
 
+static ssize_t prefcore_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+}
+
 cpufreq_freq_attr_ro(amd_pstate_max_freq);
 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
 
 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
 cpufreq_freq_attr_rw(energy_performance_preference);
 cpufreq_freq_attr_ro(energy_performance_available_preferences);
 static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
 
 static struct freq_attr *amd_pstate_attr[] = {
        &amd_pstate_max_freq,
        &amd_pstate_lowest_nonlinear_freq,
        &amd_pstate_highest_perf,
+       &amd_pstate_prefcore_ranking,
+       &amd_pstate_hw_prefcore,
        NULL,
 };
 
@@ -1093,6 +1250,8 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
        &amd_pstate_max_freq,
        &amd_pstate_lowest_nonlinear_freq,
        &amd_pstate_highest_perf,
+       &amd_pstate_prefcore_ranking,
+       &amd_pstate_hw_prefcore,
        &energy_performance_preference,
        &energy_performance_available_preferences,
        NULL,
@@ -1100,6 +1259,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
 
 static struct attribute *pstate_global_attributes[] = {
        &dev_attr_status.attr,
+       &dev_attr_prefcore.attr,
        NULL
 };
 
@@ -1151,6 +1311,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
        cpudata->cpu = policy->cpu;
        cpudata->epp_policy = 0;
 
+       amd_pstate_init_prefcore(cpudata);
+
        ret = amd_pstate_init_perf(cpudata);
        if (ret)
                goto free_cpudata1;
@@ -1232,6 +1394,12 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
        max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
        min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
 
+       if (min_limit_perf < min_perf)
+               min_limit_perf = min_perf;
+
+       if (max_limit_perf < min_limit_perf)
+               max_limit_perf = min_limit_perf;
+
        WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
        WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
 
@@ -1432,6 +1600,7 @@ static struct cpufreq_driver amd_pstate_driver = {
        .suspend        = amd_pstate_cpu_suspend,
        .resume         = amd_pstate_cpu_resume,
        .set_boost      = amd_pstate_set_boost,
+       .update_limits  = amd_pstate_update_limits,
        .name           = "amd-pstate",
        .attr           = amd_pstate_attr,
 };
@@ -1446,6 +1615,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
        .online         = amd_pstate_epp_cpu_online,
        .suspend        = amd_pstate_epp_suspend,
        .resume         = amd_pstate_epp_resume,
+       .update_limits  = amd_pstate_update_limits,
        .name           = "amd-pstate-epp",
        .attr           = amd_pstate_epp_attr,
 };
@@ -1567,7 +1737,17 @@ static int __init amd_pstate_param(char *str)
 
        return amd_pstate_set_driver(mode_idx);
 }
+
+static int __init amd_prefcore_param(char *str)
+{
+       if (!strcmp(str, "disable"))
+               amd_pstate_prefcore = false;
+
+       return 0;
+}
+
 early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
 
 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");