perf/x86: Hybrid PMU support for event constraints
authorKan Liang <kan.liang@linux.intel.com>
Mon, 12 Apr 2021 14:30:49 +0000 (07:30 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 19 Apr 2021 18:03:25 +0000 (20:03 +0200)
The events are different among hybrid PMUs. Each hybrid PMU should use
its own event constraints.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-10-git-send-email-kan.liang@linux.intel.com
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/events/perf_event.h

index e8cb892..f92d234 100644 (file)
@@ -1518,6 +1518,7 @@ void perf_event_print_debug(void)
        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        int num_counters = hybrid(cpuc->pmu, num_counters);
        int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
+       struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
        unsigned long flags;
        int idx;
 
@@ -1537,7 +1538,7 @@ void perf_event_print_debug(void)
                pr_info("CPU#%d: status:     %016llx\n", cpu, status);
                pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
                pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
-               if (x86_pmu.pebs_constraints) {
+               if (pebs_constraints) {
                        rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
                        pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
                }
index 4cfc382..447a80f 100644 (file)
@@ -3136,10 +3136,11 @@ struct event_constraint *
 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
 {
+       struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
        struct event_constraint *c;
 
-       if (x86_pmu.event_constraints) {
-               for_each_event_constraint(c, x86_pmu.event_constraints) {
+       if (event_constraints) {
+               for_each_event_constraint(c, event_constraints) {
                        if (constraint_match(c, event->hw.config)) {
                                event->hw.flags |= c->flags;
                                return c;
index 312bf3b..f1402bc 100644 (file)
@@ -959,13 +959,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = {
 
 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
 {
+       struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
        struct event_constraint *c;
 
        if (!event->attr.precise_ip)
                return NULL;
 
-       if (x86_pmu.pebs_constraints) {
-               for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+       if (pebs_constraints) {
+               for_each_event_constraint(c, pebs_constraints) {
                        if (constraint_match(c, event->hw.config)) {
                                event->hw.flags |= c->flags;
                                return c;
index b65cf46..34b7fc9 100644 (file)
@@ -648,6 +648,8 @@ struct x86_hybrid_pmu {
                                        [PERF_COUNT_HW_CACHE_MAX]
                                        [PERF_COUNT_HW_CACHE_OP_MAX]
                                        [PERF_COUNT_HW_CACHE_RESULT_MAX];
+       struct event_constraint         *event_constraints;
+       struct event_constraint         *pebs_constraints;
 };
 
 static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)