15c7982b1c58246f8c1523975a4a9128343ba044
[linux-2.6-microblaze.git] / arch / x86 / events / amd / uncore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Jacob Shin <jacob.shin@amd.com>
6  */
7
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20
21 #define NUM_COUNTERS_NB         4
22 #define NUM_COUNTERS_L2         4
23 #define NUM_COUNTERS_L3         6
24 #define MAX_COUNTERS            6
25
26 #define RDPMC_BASE_NB           6
27 #define RDPMC_BASE_LLC          10
28
29 #define COUNTER_SHIFT           16
30
31 #undef pr_fmt
32 #define pr_fmt(fmt)     "amd_uncore: " fmt
33
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37
38 static HLIST_HEAD(uncore_unused_list);
39
40 struct amd_uncore {
41         int id;
42         int refcnt;
43         int cpu;
44         int num_counters;
45         int rdpmc_base;
46         u32 msr_base;
47         cpumask_t *active_mask;
48         struct pmu *pmu;
49         struct perf_event *events[MAX_COUNTERS];
50         struct hlist_node node;
51 };
52
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61
62 static bool is_nb_event(struct perf_event *event)
63 {
64         return event->pmu->type == amd_nb_pmu.type;
65 }
66
67 static bool is_llc_event(struct perf_event *event)
68 {
69         return event->pmu->type == amd_llc_pmu.type;
70 }
71
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74         if (is_nb_event(event) && amd_uncore_nb)
75                 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76         else if (is_llc_event(event) && amd_uncore_llc)
77                 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78
79         return NULL;
80 }
81
82 static void amd_uncore_read(struct perf_event *event)
83 {
84         struct hw_perf_event *hwc = &event->hw;
85         u64 prev, new;
86         s64 delta;
87
88         /*
89          * since we do not enable counter overflow interrupts,
90          * we do not have to worry about prev_count changing on us
91          */
92
93         prev = local64_read(&hwc->prev_count);
94         rdpmcl(hwc->event_base_rdpmc, new);
95         local64_set(&hwc->prev_count, new);
96         delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97         delta >>= COUNTER_SHIFT;
98         local64_add(delta, &event->count);
99 }
100
101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103         struct hw_perf_event *hwc = &event->hw;
104
105         if (flags & PERF_EF_RELOAD)
106                 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107
108         hwc->state = 0;
109         wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110         perf_event_update_userpage(event);
111 }
112
113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115         struct hw_perf_event *hwc = &event->hw;
116
117         wrmsrl(hwc->config_base, hwc->config);
118         hwc->state |= PERF_HES_STOPPED;
119
120         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121                 amd_uncore_read(event);
122                 hwc->state |= PERF_HES_UPTODATE;
123         }
124 }
125
126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128         int i;
129         struct amd_uncore *uncore = event_to_amd_uncore(event);
130         struct hw_perf_event *hwc = &event->hw;
131
132         /* are we already assigned? */
133         if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134                 goto out;
135
136         for (i = 0; i < uncore->num_counters; i++) {
137                 if (uncore->events[i] == event) {
138                         hwc->idx = i;
139                         goto out;
140                 }
141         }
142
143         /* if not, take the first available counter */
144         hwc->idx = -1;
145         for (i = 0; i < uncore->num_counters; i++) {
146                 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147                         hwc->idx = i;
148                         break;
149                 }
150         }
151
152 out:
153         if (hwc->idx == -1)
154                 return -EBUSY;
155
156         hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157         hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158         hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160
161         if (flags & PERF_EF_START)
162                 amd_uncore_start(event, PERF_EF_RELOAD);
163
164         return 0;
165 }
166
167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169         int i;
170         struct amd_uncore *uncore = event_to_amd_uncore(event);
171         struct hw_perf_event *hwc = &event->hw;
172
173         amd_uncore_stop(event, PERF_EF_UPDATE);
174
175         for (i = 0; i < uncore->num_counters; i++) {
176                 if (cmpxchg(&uncore->events[i], event, NULL) == event)
177                         break;
178         }
179
180         hwc->idx = -1;
181 }
182
183 /*
184  * Return a full thread and slice mask until per-CPU is
185  * properly supported.
186  */
187 static u64 l3_thread_slice_mask(void)
188 {
189         if (boot_cpu_data.x86 <= 0x18)
190                 return AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK;
191
192         return AMD64_L3_EN_ALL_SLICES | AMD64_L3_EN_ALL_CORES |
193                AMD64_L3_F19H_THREAD_MASK;
194 }
195
196 static int amd_uncore_event_init(struct perf_event *event)
197 {
198         struct amd_uncore *uncore;
199         struct hw_perf_event *hwc = &event->hw;
200
201         if (event->attr.type != event->pmu->type)
202                 return -ENOENT;
203
204         /*
205          * NB and Last level cache counters (MSRs) are shared across all cores
206          * that share the same NB / Last level cache.  On family 16h and below,
207          * Interrupts can be directed to a single target core, however, event
208          * counts generated by processes running on other cores cannot be masked
209          * out. So we do not support sampling and per-thread events via
210          * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
211          */
212         hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
213         hwc->idx = -1;
214
215         if (event->cpu < 0)
216                 return -EINVAL;
217
218         /*
219          * SliceMask and ThreadMask need to be set for certain L3 events.
220          * For other events, the two fields do not affect the count.
221          */
222         if (l3_mask && is_llc_event(event))
223                 hwc->config |= l3_thread_slice_mask();
224
225         uncore = event_to_amd_uncore(event);
226         if (!uncore)
227                 return -ENODEV;
228
229         /*
230          * since request can come in to any of the shared cores, we will remap
231          * to a single common cpu.
232          */
233         event->cpu = uncore->cpu;
234
235         return 0;
236 }
237
238 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
239                                             struct device_attribute *attr,
240                                             char *buf)
241 {
242         cpumask_t *active_mask;
243         struct pmu *pmu = dev_get_drvdata(dev);
244
245         if (pmu->type == amd_nb_pmu.type)
246                 active_mask = &amd_nb_active_mask;
247         else if (pmu->type == amd_llc_pmu.type)
248                 active_mask = &amd_llc_active_mask;
249         else
250                 return 0;
251
252         return cpumap_print_to_pagebuf(true, buf, active_mask);
253 }
254 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
255
256 static struct attribute *amd_uncore_attrs[] = {
257         &dev_attr_cpumask.attr,
258         NULL,
259 };
260
261 static struct attribute_group amd_uncore_attr_group = {
262         .attrs = amd_uncore_attrs,
263 };
264
265 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
266 static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
267                                 struct kobj_attribute *attr,            \
268                                 char *page)                             \
269 {                                                                       \
270         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
271         return sprintf(page, _format "\n");                             \
272 }                                                                       \
273 static struct kobj_attribute format_attr_##_var =                       \
274         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
275
276 DEFINE_UNCORE_FORMAT_ATTR(event12,      event,          "config:0-7,32-35");
277 DEFINE_UNCORE_FORMAT_ATTR(event14,      event,          "config:0-7,32-35,59-60"); /* F17h+ DF */
278 DEFINE_UNCORE_FORMAT_ATTR(event8,       event,          "config:0-7");             /* F17h+ L3 */
279 DEFINE_UNCORE_FORMAT_ATTR(umask,        umask,          "config:8-15");
280
281 static struct attribute *amd_uncore_df_format_attr[] = {
282         &format_attr_event12.attr, /* event14 if F17h+ */
283         &format_attr_umask.attr,
284         NULL,
285 };
286
287 static struct attribute *amd_uncore_l3_format_attr[] = {
288         &format_attr_event12.attr, /* event8 if F17h+ */
289         &format_attr_umask.attr,
290         NULL,
291 };
292
293 static struct attribute_group amd_uncore_df_format_group = {
294         .name = "format",
295         .attrs = amd_uncore_df_format_attr,
296 };
297
298 static struct attribute_group amd_uncore_l3_format_group = {
299         .name = "format",
300         .attrs = amd_uncore_l3_format_attr,
301 };
302
303 static const struct attribute_group *amd_uncore_df_attr_groups[] = {
304         &amd_uncore_attr_group,
305         &amd_uncore_df_format_group,
306         NULL,
307 };
308
309 static const struct attribute_group *amd_uncore_l3_attr_groups[] = {
310         &amd_uncore_attr_group,
311         &amd_uncore_l3_format_group,
312         NULL,
313 };
314
315 static struct pmu amd_nb_pmu = {
316         .task_ctx_nr    = perf_invalid_context,
317         .attr_groups    = amd_uncore_df_attr_groups,
318         .name           = "amd_nb",
319         .event_init     = amd_uncore_event_init,
320         .add            = amd_uncore_add,
321         .del            = amd_uncore_del,
322         .start          = amd_uncore_start,
323         .stop           = amd_uncore_stop,
324         .read           = amd_uncore_read,
325         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
326 };
327
328 static struct pmu amd_llc_pmu = {
329         .task_ctx_nr    = perf_invalid_context,
330         .attr_groups    = amd_uncore_l3_attr_groups,
331         .name           = "amd_l2",
332         .event_init     = amd_uncore_event_init,
333         .add            = amd_uncore_add,
334         .del            = amd_uncore_del,
335         .start          = amd_uncore_start,
336         .stop           = amd_uncore_stop,
337         .read           = amd_uncore_read,
338         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
339 };
340
341 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
342 {
343         return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
344                         cpu_to_node(cpu));
345 }
346
347 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
348 {
349         struct amd_uncore *uncore_nb = NULL, *uncore_llc;
350
351         if (amd_uncore_nb) {
352                 uncore_nb = amd_uncore_alloc(cpu);
353                 if (!uncore_nb)
354                         goto fail;
355                 uncore_nb->cpu = cpu;
356                 uncore_nb->num_counters = num_counters_nb;
357                 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
358                 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
359                 uncore_nb->active_mask = &amd_nb_active_mask;
360                 uncore_nb->pmu = &amd_nb_pmu;
361                 uncore_nb->id = -1;
362                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
363         }
364
365         if (amd_uncore_llc) {
366                 uncore_llc = amd_uncore_alloc(cpu);
367                 if (!uncore_llc)
368                         goto fail;
369                 uncore_llc->cpu = cpu;
370                 uncore_llc->num_counters = num_counters_llc;
371                 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
372                 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
373                 uncore_llc->active_mask = &amd_llc_active_mask;
374                 uncore_llc->pmu = &amd_llc_pmu;
375                 uncore_llc->id = -1;
376                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
377         }
378
379         return 0;
380
381 fail:
382         if (amd_uncore_nb)
383                 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
384         kfree(uncore_nb);
385         return -ENOMEM;
386 }
387
388 static struct amd_uncore *
389 amd_uncore_find_online_sibling(struct amd_uncore *this,
390                                struct amd_uncore * __percpu *uncores)
391 {
392         unsigned int cpu;
393         struct amd_uncore *that;
394
395         for_each_online_cpu(cpu) {
396                 that = *per_cpu_ptr(uncores, cpu);
397
398                 if (!that)
399                         continue;
400
401                 if (this == that)
402                         continue;
403
404                 if (this->id == that->id) {
405                         hlist_add_head(&this->node, &uncore_unused_list);
406                         this = that;
407                         break;
408                 }
409         }
410
411         this->refcnt++;
412         return this;
413 }
414
415 static int amd_uncore_cpu_starting(unsigned int cpu)
416 {
417         unsigned int eax, ebx, ecx, edx;
418         struct amd_uncore *uncore;
419
420         if (amd_uncore_nb) {
421                 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
422                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
423                 uncore->id = ecx & 0xff;
424
425                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
426                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
427         }
428
429         if (amd_uncore_llc) {
430                 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
431                 uncore->id = per_cpu(cpu_llc_id, cpu);
432
433                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
434                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
435         }
436
437         return 0;
438 }
439
440 static void uncore_clean_online(void)
441 {
442         struct amd_uncore *uncore;
443         struct hlist_node *n;
444
445         hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
446                 hlist_del(&uncore->node);
447                 kfree(uncore);
448         }
449 }
450
451 static void uncore_online(unsigned int cpu,
452                           struct amd_uncore * __percpu *uncores)
453 {
454         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
455
456         uncore_clean_online();
457
458         if (cpu == uncore->cpu)
459                 cpumask_set_cpu(cpu, uncore->active_mask);
460 }
461
462 static int amd_uncore_cpu_online(unsigned int cpu)
463 {
464         if (amd_uncore_nb)
465                 uncore_online(cpu, amd_uncore_nb);
466
467         if (amd_uncore_llc)
468                 uncore_online(cpu, amd_uncore_llc);
469
470         return 0;
471 }
472
473 static void uncore_down_prepare(unsigned int cpu,
474                                 struct amd_uncore * __percpu *uncores)
475 {
476         unsigned int i;
477         struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
478
479         if (this->cpu != cpu)
480                 return;
481
482         /* this cpu is going down, migrate to a shared sibling if possible */
483         for_each_online_cpu(i) {
484                 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
485
486                 if (cpu == i)
487                         continue;
488
489                 if (this == that) {
490                         perf_pmu_migrate_context(this->pmu, cpu, i);
491                         cpumask_clear_cpu(cpu, that->active_mask);
492                         cpumask_set_cpu(i, that->active_mask);
493                         that->cpu = i;
494                         break;
495                 }
496         }
497 }
498
499 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
500 {
501         if (amd_uncore_nb)
502                 uncore_down_prepare(cpu, amd_uncore_nb);
503
504         if (amd_uncore_llc)
505                 uncore_down_prepare(cpu, amd_uncore_llc);
506
507         return 0;
508 }
509
510 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
511 {
512         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
513
514         if (cpu == uncore->cpu)
515                 cpumask_clear_cpu(cpu, uncore->active_mask);
516
517         if (!--uncore->refcnt)
518                 kfree(uncore);
519         *per_cpu_ptr(uncores, cpu) = NULL;
520 }
521
522 static int amd_uncore_cpu_dead(unsigned int cpu)
523 {
524         if (amd_uncore_nb)
525                 uncore_dead(cpu, amd_uncore_nb);
526
527         if (amd_uncore_llc)
528                 uncore_dead(cpu, amd_uncore_llc);
529
530         return 0;
531 }
532
533 static int __init amd_uncore_init(void)
534 {
535         struct attribute **df_attr = amd_uncore_df_format_attr;
536         struct attribute **l3_attr = amd_uncore_l3_format_attr;
537         int ret = -ENODEV;
538
539         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
540             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
541                 return -ENODEV;
542
543         if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
544                 return -ENODEV;
545
546         num_counters_nb = NUM_COUNTERS_NB;
547         num_counters_llc = NUM_COUNTERS_L2;
548         if (boot_cpu_data.x86 >= 0x17) {
549                 /*
550                  * For F17h and above, the Northbridge counters are
551                  * repurposed as Data Fabric counters. Also, L3
552                  * counters are supported too. The PMUs are exported
553                  * based on family as either L2 or L3 and NB or DF.
554                  */
555                 num_counters_llc          = NUM_COUNTERS_L3;
556                 amd_nb_pmu.name           = "amd_df";
557                 amd_llc_pmu.name          = "amd_l3";
558                 l3_mask                   = true;
559         }
560
561         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
562                 if (boot_cpu_data.x86 >= 0x17)
563                         *df_attr = &format_attr_event14.attr;
564
565                 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
566                 if (!amd_uncore_nb) {
567                         ret = -ENOMEM;
568                         goto fail_nb;
569                 }
570                 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
571                 if (ret)
572                         goto fail_nb;
573
574                 pr_info("%s NB counters detected\n",
575                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
576                                 "HYGON" : "AMD");
577                 ret = 0;
578         }
579
580         if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
581                 if (boot_cpu_data.x86 >= 0x17)
582                         *l3_attr = &format_attr_event8.attr;
583
584                 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
585                 if (!amd_uncore_llc) {
586                         ret = -ENOMEM;
587                         goto fail_llc;
588                 }
589                 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
590                 if (ret)
591                         goto fail_llc;
592
593                 pr_info("%s LLC counters detected\n",
594                         boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
595                                 "HYGON" : "AMD");
596                 ret = 0;
597         }
598
599         /*
600          * Install callbacks. Core will call them for each online cpu.
601          */
602         if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
603                               "perf/x86/amd/uncore:prepare",
604                               amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
605                 goto fail_llc;
606
607         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
608                               "perf/x86/amd/uncore:starting",
609                               amd_uncore_cpu_starting, NULL))
610                 goto fail_prep;
611         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
612                               "perf/x86/amd/uncore:online",
613                               amd_uncore_cpu_online,
614                               amd_uncore_cpu_down_prepare))
615                 goto fail_start;
616         return 0;
617
618 fail_start:
619         cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
620 fail_prep:
621         cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
622 fail_llc:
623         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
624                 perf_pmu_unregister(&amd_nb_pmu);
625         if (amd_uncore_llc)
626                 free_percpu(amd_uncore_llc);
627 fail_nb:
628         if (amd_uncore_nb)
629                 free_percpu(amd_uncore_nb);
630
631         return ret;
632 }
633 device_initcall(amd_uncore_init);