From 24ee38ffe61a68fc35065fcab1908883a34c866b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Apr 2021 07:30:49 -0700 Subject: [PATCH 1/1] perf/x86: Hybrid PMU support for event constraints The events are different among hybrid PMUs. Each hybrid PMU should use its own event constraints. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andi Kleen Link: https://lkml.kernel.org/r/1618237865-33448-10-git-send-email-kan.liang@linux.intel.com --- arch/x86/events/core.c | 3 ++- arch/x86/events/intel/core.c | 5 +++-- arch/x86/events/intel/ds.c | 5 +++-- arch/x86/events/perf_event.h | 2 ++ 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e8cb892c5826..f92d2344aa05 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1518,6 +1518,7 @@ void perf_event_print_debug(void) struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); int num_counters = hybrid(cpuc->pmu, num_counters); int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); + struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); unsigned long flags; int idx; @@ -1537,7 +1538,7 @@ void perf_event_print_debug(void) pr_info("CPU#%d: status: %016llx\n", cpu, status); pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); - if (x86_pmu.pebs_constraints) { + if (pebs_constraints) { rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 4cfc382f9c64..447a80f88033 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3136,10 +3136,11 @@ struct event_constraint * x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { + struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); struct event_constraint *c; - if (x86_pmu.event_constraints) { - for_each_event_constraint(c, x86_pmu.event_constraints) { + if (event_constraints) { + for_each_event_constraint(c, event_constraints) { if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 312bf3b673a1..f1402bc1e255 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -959,13 +959,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = { struct event_constraint *intel_pebs_constraints(struct perf_event *event) { + struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints); struct event_constraint *c; if (!event->attr.precise_ip) return NULL; - if (x86_pmu.pebs_constraints) { - for_each_event_constraint(c, x86_pmu.pebs_constraints) { + if (pebs_constraints) { + for_each_event_constraint(c, pebs_constraints) { if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index b65cf4633f24..34b7fc92f425 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -648,6 +648,8 @@ struct x86_hybrid_pmu { [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; + struct event_constraint *event_constraints; + struct event_constraint *pebs_constraints; }; static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) -- 2.20.1