Merge tag 'sched-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kernel / smpboot.c
index 99bdceb..9278ed7 100644 (file)
 #include <asm/hw_irq.h>
 #include <asm/stackprotector.h>
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+#endif
+
 /* representing HT siblings of each logical CPU */
 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -148,7 +152,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
        *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 }
 
-static void init_freq_invariance(bool secondary);
+static void init_freq_invariance(bool secondary, bool cppc_ready);
 
 /*
  * Report back to the Boot Processor during boot time or to the caller processor
@@ -186,7 +190,7 @@ static void smp_callin(void)
         */
        set_cpu_sibling_map(raw_smp_processor_id());
 
-       init_freq_invariance(true);
+       init_freq_invariance(true, false);
 
        /*
         * Get our bogomips.
@@ -1341,7 +1345,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_sched_topology(x86_topology);
 
        set_cpu_sibling_map(0);
-       init_freq_invariance(false);
+       init_freq_invariance(false, false);
        smp_sanity_check();
 
        switch (apic_intr_mode) {
@@ -2028,6 +2032,48 @@ out:
        return true;
 }
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+static bool amd_set_max_freq_ratio(void)
+{
+       struct cppc_perf_caps perf_caps;
+       u64 highest_perf, nominal_perf;
+       u64 perf_ratio;
+       int rc;
+
+       rc = cppc_get_perf_caps(0, &perf_caps);
+       if (rc) {
+               pr_debug("Could not retrieve perf counters (%d)\n", rc);
+               return false;
+       }
+
+       highest_perf = perf_caps.highest_perf;
+       nominal_perf = perf_caps.nominal_perf;
+
+       if (!highest_perf || !nominal_perf) {
+               pr_debug("Could not retrieve highest or nominal performance\n");
+               return false;
+       }
+
+       perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
+       /* midpoint between max_boost and max_P */
+       perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
+       if (!perf_ratio) {
+               pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
+               return false;
+       }
+
+       arch_turbo_freq_ratio = perf_ratio;
+       arch_set_max_freq_ratio(false);
+
+       return true;
+}
+#else
+static bool amd_set_max_freq_ratio(void)
+{
+       return false;
+}
+#endif
+
 static void init_counter_refs(void)
 {
        u64 aperf, mperf;
@@ -2039,7 +2085,7 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
-static void init_freq_invariance(bool secondary)
+static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
 
@@ -2055,15 +2101,38 @@ static void init_freq_invariance(bool secondary)
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
                ret = intel_set_max_freq_ratio();
+       else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (!cppc_ready) {
+                       return;
+               }
+               ret = amd_set_max_freq_ratio();
+       }
 
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
        }
 }
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+static DEFINE_MUTEX(freq_invariance_lock);
+
+void init_freq_invariance_cppc(void)
+{
+       static bool secondary;
+
+       mutex_lock(&freq_invariance_lock);
+
+       init_freq_invariance(secondary, true);
+       secondary = true;
+
+       mutex_unlock(&freq_invariance_lock);
+}
+#endif
+
 static void disable_freq_invariance_workfn(struct work_struct *work)
 {
        static_branch_disable(&arch_scale_freq_key);
@@ -2113,7 +2182,7 @@ error:
        schedule_work(&disable_freq_invariance_work);
 }
 #else
-static inline void init_freq_invariance(bool secondary)
+static inline void init_freq_invariance(bool secondary, bool cppc_ready)
 {
 }
 #endif /* CONFIG_X86_64 */