perf/amd/uncore: Set all slices and threads to restore perf stat -a behaviour
[linux-2.6-microblaze.git] / arch / x86 / events / amd / uncore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Jacob Shin <jacob.shin@amd.com>
6  */
7
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20
21 #define NUM_COUNTERS_NB         4
22 #define NUM_COUNTERS_L2         4
23 #define NUM_COUNTERS_L3         6
24 #define MAX_COUNTERS            6
25
26 #define RDPMC_BASE_NB           6
27 #define RDPMC_BASE_LLC          10
28
29 #define COUNTER_SHIFT           16
30
31 #undef pr_fmt
32 #define pr_fmt(fmt)     "amd_uncore: " fmt
33
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37
38 static HLIST_HEAD(uncore_unused_list);
39
40 struct amd_uncore {
41         int id;
42         int refcnt;
43         int cpu;
44         int num_counters;
45         int rdpmc_base;
46         u32 msr_base;
47         cpumask_t *active_mask;
48         struct pmu *pmu;
49         struct perf_event *events[MAX_COUNTERS];
50         struct hlist_node node;
51 };
52
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61
62 static bool is_nb_event(struct perf_event *event)
63 {
64         return event->pmu->type == amd_nb_pmu.type;
65 }
66
67 static bool is_llc_event(struct perf_event *event)
68 {
69         return event->pmu->type == amd_llc_pmu.type;
70 }
71
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74         if (is_nb_event(event) && amd_uncore_nb)
75                 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76         else if (is_llc_event(event) && amd_uncore_llc)
77                 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78
79         return NULL;
80 }
81
82 static void amd_uncore_read(struct perf_event *event)
83 {
84         struct hw_perf_event *hwc = &event->hw;
85         u64 prev, new;
86         s64 delta;
87
88         /*
89          * since we do not enable counter overflow interrupts,
90          * we do not have to worry about prev_count changing on us
91          */
92
93         prev = local64_read(&hwc->prev_count);
94         rdpmcl(hwc->event_base_rdpmc, new);
95         local64_set(&hwc->prev_count, new);
96         delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97         delta >>= COUNTER_SHIFT;
98         local64_add(delta, &event->count);
99 }
100
101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103         struct hw_perf_event *hwc = &event->hw;
104
105         if (flags & PERF_EF_RELOAD)
106                 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107
108         hwc->state = 0;
109         wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110         perf_event_update_userpage(event);
111 }
112
113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115         struct hw_perf_event *hwc = &event->hw;
116
117         wrmsrl(hwc->config_base, hwc->config);
118         hwc->state |= PERF_HES_STOPPED;
119
120         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121                 amd_uncore_read(event);
122                 hwc->state |= PERF_HES_UPTODATE;
123         }
124 }
125
126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128         int i;
129         struct amd_uncore *uncore = event_to_amd_uncore(event);
130         struct hw_perf_event *hwc = &event->hw;
131
132         /* are we already assigned? */
133         if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134                 goto out;
135
136         for (i = 0; i < uncore->num_counters; i++) {
137                 if (uncore->events[i] == event) {
138                         hwc->idx = i;
139                         goto out;
140                 }
141         }
142
143         /* if not, take the first available counter */
144         hwc->idx = -1;
145         for (i = 0; i < uncore->num_counters; i++) {
146                 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147                         hwc->idx = i;
148                         break;
149                 }
150         }
151
152 out:
153         if (hwc->idx == -1)
154                 return -EBUSY;
155
156         hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157         hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158         hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160
161         if (flags & PERF_EF_START)
162                 amd_uncore_start(event, PERF_EF_RELOAD);
163
164         return 0;
165 }
166
167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169         int i;
170         struct amd_uncore *uncore = event_to_amd_uncore(event);
171         struct hw_perf_event *hwc = &event->hw;
172
173         amd_uncore_stop(event, PERF_EF_UPDATE);
174
175         for (i = 0; i < uncore->num_counters; i++) {
176                 if (cmpxchg(&uncore->events[i], event, NULL) == event)
177                         break;
178         }
179
180         hwc->idx = -1;
181 }
182
183 /*
184  * Return a full thread and slice mask until per-CPU is
185  * properly supported.
186  */
187 static u64 l3_thread_slice_mask(void)
188 {
189         if (boot_cpu_data.x86 <= 0x18)
190                 return AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK;
191
192         return AMD64_L3_EN_ALL_SLICES | AMD64_L3_EN_ALL_CORES |
193                AMD64_L3_F19H_THREAD_MASK;
194 }
195
196 static int amd_uncore_event_init(struct perf_event *event)
197 {
198         struct amd_uncore *uncore;
199         struct hw_perf_event *hwc = &event->hw;
200
201         if (event->attr.type != event->pmu->type)
202                 return -ENOENT;
203
204         /*
205          * NB and Last level cache counters (MSRs) are shared across all cores
206          * that share the same NB / Last level cache.  On family 16h and below,
207          * Interrupts can be directed to a single target core, however, event
208          * counts generated by processes running on other cores cannot be masked
209          * out. So we do not support sampling and per-thread events via
210          * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
211          */
212         hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
213         hwc->idx = -1;
214
215         if (event->cpu < 0)
216                 return -EINVAL;
217
218         /*
219          * SliceMask and ThreadMask need to be set for certain L3 events.
220          * For other events, the two fields do not affect the count.
221          */
222         if (l3_mask && is_llc_event(event))
223                 hwc->config |= l3_thread_slice_mask();
224
225         uncore = event_to_amd_uncore(event);
226         if (!uncore)
227                 return -ENODEV;
228
229         /*
230          * since request can come in to any of the shared cores, we will remap
231          * to a single common cpu.
232          */
233         event->cpu = uncore->cpu;
234
235         return 0;
236 }
237
238 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
239                                             struct device_attribute *attr,
240                                             char *buf)
241 {
242         cpumask_t *active_mask;
243         struct pmu *pmu = dev_get_drvdata(dev);
244
245         if (pmu->type == amd_nb_pmu.type)
246                 active_mask = &amd_nb_active_mask;
247         else if (pmu->type == amd_llc_pmu.type)
248                 active_mask = &amd_llc_active_mask;
249         else
250                 return 0;
251
252         return cpumap_print_to_pagebuf(true, buf, active_mask);
253 }
254 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
255
256 static struct attribute *amd_uncore_attrs[] = {
257         &dev_attr_cpumask.attr,
258         NULL,
259 };
260
261 static struct attribute_group amd_uncore_attr_group = {
262         .attrs = amd_uncore_attrs,
263 };
264
265 /*
266  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
267  * on family
268  */
269 #define AMD_FORMAT_ATTR(_dev, _name, _format)                                \
270 static ssize_t                                                               \
271 _dev##_show##_name(struct device *dev,                                       \
272                 struct device_attribute *attr,                               \
273                 char *page)                                                  \
274 {                                                                            \
275         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                          \
276         return sprintf(page, _format "\n");                                  \
277 }                                                                            \
278 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
279
280 /* Used for each uncore counter type */
281 #define AMD_ATTRIBUTE(_name)                                                 \
282 static struct attribute *amd_uncore_format_attr_##_name[] = {                \
283         &format_attr_event_##_name.attr,                                     \
284         &format_attr_umask.attr,                                             \
285         NULL,                                                                \
286 };                                                                           \
287 static struct attribute_group amd_uncore_format_group_##_name = {            \
288         .name = "format",                                                    \
289         .attrs = amd_uncore_format_attr_##_name,                             \
290 };                                                                           \
291 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
292         &amd_uncore_attr_group,                                              \
293         &amd_uncore_format_group_##_name,                                    \
294         NULL,                                                                \
295 };
296
297 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
298 AMD_FORMAT_ATTR(umask, , "config:8-15");
299 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
300 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
301 AMD_ATTRIBUTE(df);
302 AMD_ATTRIBUTE(l3);
303
304 static struct pmu amd_nb_pmu = {
305         .task_ctx_nr    = perf_invalid_context,
306         .event_init     = amd_uncore_event_init,
307         .add            = amd_uncore_add,
308         .del            = amd_uncore_del,
309         .start          = amd_uncore_start,
310         .stop           = amd_uncore_stop,
311         .read           = amd_uncore_read,
312         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
313 };
314
315 static struct pmu amd_llc_pmu = {
316         .task_ctx_nr    = perf_invalid_context,
317         .event_init     = amd_uncore_event_init,
318         .add            = amd_uncore_add,
319         .del            = amd_uncore_del,
320         .start          = amd_uncore_start,
321         .stop           = amd_uncore_stop,
322         .read           = amd_uncore_read,
323         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
324 };
325
326 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
327 {
328         return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
329                         cpu_to_node(cpu));
330 }
331
332 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
333 {
334         struct amd_uncore *uncore_nb = NULL, *uncore_llc;
335
336         if (amd_uncore_nb) {
337                 uncore_nb = amd_uncore_alloc(cpu);
338                 if (!uncore_nb)
339                         goto fail;
340                 uncore_nb->cpu = cpu;
341                 uncore_nb->num_counters = num_counters_nb;
342                 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
343                 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
344                 uncore_nb->active_mask = &amd_nb_active_mask;
345                 uncore_nb->pmu = &amd_nb_pmu;
346                 uncore_nb->id = -1;
347                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
348         }
349
350         if (amd_uncore_llc) {
351                 uncore_llc = amd_uncore_alloc(cpu);
352                 if (!uncore_llc)
353                         goto fail;
354                 uncore_llc->cpu = cpu;
355                 uncore_llc->num_counters = num_counters_llc;
356                 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
357                 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
358                 uncore_llc->active_mask = &amd_llc_active_mask;
359                 uncore_llc->pmu = &amd_llc_pmu;
360                 uncore_llc->id = -1;
361                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
362         }
363
364         return 0;
365
366 fail:
367         if (amd_uncore_nb)
368                 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
369         kfree(uncore_nb);
370         return -ENOMEM;
371 }
372
373 static struct amd_uncore *
374 amd_uncore_find_online_sibling(struct amd_uncore *this,
375                                struct amd_uncore * __percpu *uncores)
376 {
377         unsigned int cpu;
378         struct amd_uncore *that;
379
380         for_each_online_cpu(cpu) {
381                 that = *per_cpu_ptr(uncores, cpu);
382
383                 if (!that)
384                         continue;
385
386                 if (this == that)
387                         continue;
388
389                 if (this->id == that->id) {
390                         hlist_add_head(&this->node, &uncore_unused_list);
391                         this = that;
392                         break;
393                 }
394         }
395
396         this->refcnt++;
397         return this;
398 }
399
400 static int amd_uncore_cpu_starting(unsigned int cpu)
401 {
402         unsigned int eax, ebx, ecx, edx;
403         struct amd_uncore *uncore;
404
405         if (amd_uncore_nb) {
406                 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
407                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
408                 uncore->id = ecx & 0xff;
409
410                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
411                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
412         }
413
414         if (amd_uncore_llc) {
415                 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
416                 uncore->id = per_cpu(cpu_llc_id, cpu);
417
418                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
419                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
420         }
421
422         return 0;
423 }
424
425 static void uncore_clean_online(void)
426 {
427         struct amd_uncore *uncore;
428         struct hlist_node *n;
429
430         hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
431                 hlist_del(&uncore->node);
432                 kfree(uncore);
433         }
434 }
435
436 static void uncore_online(unsigned int cpu,
437                           struct amd_uncore * __percpu *uncores)
438 {
439         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
440
441         uncore_clean_online();
442
443         if (cpu == uncore->cpu)
444                 cpumask_set_cpu(cpu, uncore->active_mask);
445 }
446
447 static int amd_uncore_cpu_online(unsigned int cpu)
448 {
449         if (amd_uncore_nb)
450                 uncore_online(cpu, amd_uncore_nb);
451
452         if (amd_uncore_llc)
453                 uncore_online(cpu, amd_uncore_llc);
454
455         return 0;
456 }
457
458 static void uncore_down_prepare(unsigned int cpu,
459                                 struct amd_uncore * __percpu *uncores)
460 {
461         unsigned int i;
462         struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
463
464         if (this->cpu != cpu)
465                 return;
466
467         /* this cpu is going down, migrate to a shared sibling if possible */
468         for_each_online_cpu(i) {
469                 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
470
471                 if (cpu == i)
472                         continue;
473
474                 if (this == that) {
475                         perf_pmu_migrate_context(this->pmu, cpu, i);
476                         cpumask_clear_cpu(cpu, that->active_mask);
477                         cpumask_set_cpu(i, that->active_mask);
478                         that->cpu = i;
479                         break;
480                 }
481         }
482 }
483
484 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
485 {
486         if (amd_uncore_nb)
487                 uncore_down_prepare(cpu, amd_uncore_nb);
488
489         if (amd_uncore_llc)
490                 uncore_down_prepare(cpu, amd_uncore_llc);
491
492         return 0;
493 }
494
495 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
496 {
497         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
498
499         if (cpu == uncore->cpu)
500                 cpumask_clear_cpu(cpu, uncore->active_mask);
501
502         if (!--uncore->refcnt)
503                 kfree(uncore);
504         *per_cpu_ptr(uncores, cpu) = NULL;
505 }
506
507 static int amd_uncore_cpu_dead(unsigned int cpu)
508 {
509         if (amd_uncore_nb)
510                 uncore_dead(cpu, amd_uncore_nb);
511
512         if (amd_uncore_llc)
513                 uncore_dead(cpu, amd_uncore_llc);
514
515         return 0;
516 }
517
518 static int __init amd_uncore_init(void)
519 {
520         int ret = -ENODEV;
521
522         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
523             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
524                 return -ENODEV;
525
526         if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
527                 return -ENODEV;
528
529         if (boot_cpu_data.x86 >= 0x17) {
530                 /*
531                  * For F17h and above, the Northbridge counters are
532                  * repurposed as Data Fabric counters. Also, L3
533                  * counters are supported too. The PMUs are exported
534                  * based on family as either L2 or L3 and NB or DF.
535                  */
536                 num_counters_nb           = NUM_COUNTERS_NB;
537                 num_counters_llc          = NUM_COUNTERS_L3;
538                 amd_nb_pmu.name           = "amd_df";
539                 amd_llc_pmu.name          = "amd_l3";
540                 format_attr_event_df.show = &event_show_df;
541                 format_attr_event_l3.show = &event_show_l3;
542                 l3_mask                   = true;
543         } else {
544                 num_counters_nb           = NUM_COUNTERS_NB;
545                 num_counters_llc          = NUM_COUNTERS_L2;
546                 amd_nb_pmu.name           = "amd_nb";
547                 amd_llc_pmu.name          = "amd_l2";
548                 format_attr_event_df      = format_attr_event;
549                 format_attr_event_l3      = format_attr_event;
550                 l3_mask                   = false;
551         }
552
553         amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
554         amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
555
556         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
557                 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
558                 if (!amd_uncore_nb) {
559                         ret = -ENOMEM;
560                         goto fail_nb;
561                 }
562                 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
563                 if (ret)
564                         goto fail_nb;
565
566                 pr_info("%s NB counters detected\n",
567                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
568                                 "HYGON" : "AMD");
569                 ret = 0;
570         }
571
572         if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
573                 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
574                 if (!amd_uncore_llc) {
575                         ret = -ENOMEM;
576                         goto fail_llc;
577                 }
578                 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
579                 if (ret)
580                         goto fail_llc;
581
582                 pr_info("%s LLC counters detected\n",
583                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
584                                 "HYGON" : "AMD");
585                 ret = 0;
586         }
587
588         /*
589          * Install callbacks. Core will call them for each online cpu.
590          */
591         if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
592                               "perf/x86/amd/uncore:prepare",
593                               amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
594                 goto fail_llc;
595
596         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
597                               "perf/x86/amd/uncore:starting",
598                               amd_uncore_cpu_starting, NULL))
599                 goto fail_prep;
600         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
601                               "perf/x86/amd/uncore:online",
602                               amd_uncore_cpu_online,
603                               amd_uncore_cpu_down_prepare))
604                 goto fail_start;
605         return 0;
606
607 fail_start:
608         cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
609 fail_prep:
610         cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
611 fail_llc:
612         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
613                 perf_pmu_unregister(&amd_nb_pmu);
614         if (amd_uncore_llc)
615                 free_percpu(amd_uncore_llc);
616 fail_nb:
617         if (amd_uncore_nb)
618                 free_percpu(amd_uncore_nb);
619
620         return ret;
621 }
622 device_initcall(amd_uncore_init);