ACPI: Add perf low power callback
authorStephane Eranian <eranian@google.com>
Tue, 22 Mar 2022 22:15:12 +0000 (15:15 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 5 Apr 2022 08:24:38 +0000 (10:24 +0200)
Add an optional callback needed by some PMU features, e.g., AMD
BRS, to give a chance to the perf_events code to change its state before
a CPU goes to low power and after it comes back.

The callback is void when the PERF_NEEDS_LOPWR_CB flag is not set.
This flag must be set in arch specific perf_event.h header whenever needed.
When not set, there is no impact on the ACPI code.

Signed-off-by: Stephane Eranian <eranian@google.com>
[peterz: build fix]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220322221517.2510440-9-eranian@google.com
drivers/acpi/acpi_pad.c
drivers/acpi/processor_idle.c
include/linux/perf_event.h

index f45979a..ec0e22a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/tick.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/perf_event.h>
 #include <asm/mwait.h>
 #include <xen/xen.h>
 
@@ -164,6 +165,9 @@ static int power_saving_thread(void *data)
                                tsc_marked_unstable = 1;
                        }
                        local_irq_disable();
+
+                       perf_lopwr_cb(true);
+
                        tick_broadcast_enable();
                        tick_broadcast_enter();
                        stop_critical_timings();
@@ -172,6 +176,9 @@ static int power_saving_thread(void *data)
 
                        start_critical_timings();
                        tick_broadcast_exit();
+
+                       perf_lopwr_cb(false);
+
                        local_irq_enable();
 
                        if (time_before(expire_time, jiffies)) {
index 32b20ef..05dc0e1 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu.h>
 #include <linux/minmax.h>
+#include <linux/perf_event.h>
 #include <acpi/processor.h>
 
 /*
@@ -549,6 +550,8 @@ static void wait_for_freeze(void)
  */
 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
 {
+       perf_lopwr_cb(true);
+
        if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
@@ -559,6 +562,8 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
                inb(cx->address);
                wait_for_freeze();
        }
+
+       perf_lopwr_cb(false);
 }
 
 /**
index a411080..da75956 100644 (file)
@@ -1676,4 +1676,10 @@ typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
                                           unsigned int cnt);
 DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
 
+#ifndef PERF_NEEDS_LOPWR_CB
+static inline void perf_lopwr_cb(bool mode)
+{
+}
+#endif
+
 #endif /* _LINUX_PERF_EVENT_H */