perf/amd/uncore: Add support for Family 19h L3 PMU
authorKim Phillips <kim.phillips@amd.com>
Fri, 13 Mar 2020 23:10:24 +0000 (18:10 -0500)
committerBorislav Petkov <bp@suse.de>
Tue, 17 Mar 2020 12:01:03 +0000 (13:01 +0100)
Family 19h introduces change in slice, core and thread specification in
its L3 Performance Event Select (ChL3PmcCfg) h/w register. The change is
incompatible with Family 17h's version of the register.

Introduce a new path in l3_thread_slice_mask() to do things differently
for Family 19h vs. Family 17h, otherwise the new hardware doesn't get
programmed correctly.

Instead of a linear core--thread bitmask, Family 19h takes an encoded
core number, and a separate thread mask. There are new bits that are set
for all cores and all slices, of which only the latter is used, since
the driver counts events for all slices on behalf of the specified CPU.

Also update amd_uncore_init() to base its L2/NB vs. L3/Data Fabric mode
decision based on Family 17h or above, not just 17h and 18h: the Family
19h Data Fabric PMC is compatible with the Family 17h DF PMC.

 [ bp: Touchups. ]

Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200313231024.17601-3-kim.phillips@amd.com
arch/x86/events/amd/uncore.c
arch/x86/include/asm/perf_event.h

index 07af497..46018e5 100644 (file)
@@ -191,10 +191,18 @@ static u64 l3_thread_slice_mask(int cpu)
        if (topology_smt_supported() && !topology_is_primary_thread(cpu))
                thread = 1;
 
-       shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
+       if (boot_cpu_data.x86 <= 0x18) {
+               shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
+               thread_mask = BIT_ULL(shift);
+
+               return AMD64_L3_SLICE_MASK | thread_mask;
+       }
+
+       core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
+       shift = AMD64_L3_THREAD_SHIFT + thread;
        thread_mask = BIT_ULL(shift);
 
-       return AMD64_L3_SLICE_MASK | thread_mask;
+       return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
 }
 
 static int amd_uncore_event_init(struct perf_event *event)
@@ -223,8 +231,8 @@ static int amd_uncore_event_init(struct perf_event *event)
                return -EINVAL;
 
        /*
-        * SliceMask and ThreadMask need to be set for certain L3 events in
-        * Family 17h. For other events, the two fields do not affect the count.
+        * SliceMask and ThreadMask need to be set for certain L3 events.
+        * For other events, the two fields do not affect the count.
         */
        if (l3_mask && is_llc_event(event))
                hwc->config |= l3_thread_slice_mask(event->cpu);
@@ -533,9 +541,9 @@ static int __init amd_uncore_init(void)
        if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
                return -ENODEV;
 
-       if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
+       if (boot_cpu_data.x86 >= 0x17) {
                /*
-                * For F17h or F18h, the Northbridge counters are
+                * For F17h and above, the Northbridge counters are
                 * repurposed as Data Fabric counters. Also, L3
                 * counters are supported too. The PMUs are exported
                 * based on family as either L2 or L3 and NB or DF.
index 29964b0..e855e9c 100644 (file)
 
 #define AMD64_L3_SLICE_SHIFT                           48
 #define AMD64_L3_SLICE_MASK                            \
-       ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+       (0xFULL << AMD64_L3_SLICE_SHIFT)
+#define AMD64_L3_SLICEID_MASK                          \
+       (0x7ULL << AMD64_L3_SLICE_SHIFT)
 
 #define AMD64_L3_THREAD_SHIFT                          56
 #define AMD64_L3_THREAD_MASK                           \
-       ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+       (0xFFULL << AMD64_L3_THREAD_SHIFT)
+#define AMD64_L3_F19H_THREAD_MASK                      \
+       (0x3ULL << AMD64_L3_THREAD_SHIFT)
+
+#define AMD64_L3_EN_ALL_CORES                          BIT_ULL(47)
+#define AMD64_L3_EN_ALL_SLICES                         BIT_ULL(46)
+
+#define AMD64_L3_COREID_SHIFT                          42
+#define AMD64_L3_COREID_MASK                           \
+       (0x7ULL << AMD64_L3_COREID_SHIFT)
 
 #define X86_RAW_EVENT_MASK             \
        (ARCH_PERFMON_EVENTSEL_EVENT |  \