Merge tag 'pci-v5.8-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux-2.6-microblaze.git] / arch / x86 / events / intel / cstate.c
1 /*
2  * Support cstate residency counters
3  *
4  * Copyright (C) 2015, Intel Corp.
5  * Author: Kan Liang (kan.liang@intel.com)
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Library General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Library General Public License for more details.
16  *
17  */
18
19 /*
20  * This file export cstate related free running (read-only) counters
21  * for perf. These counters may be use simultaneously by other tools,
22  * such as turbostat. However, it still make sense to implement them
23  * in perf. Because we can conveniently collect them together with
24  * other events, and allow to use them from tools without special MSR
25  * access code.
26  *
27  * The events only support system-wide mode counting. There is no
28  * sampling support because it is not supported by the hardware.
29  *
30  * According to counters' scope and category, two PMUs are registered
31  * with the perf_event core subsystem.
32  *  - 'cstate_core': The counter is available for each physical core.
33  *    The counters include CORE_C*_RESIDENCY.
34  *  - 'cstate_pkg': The counter is available for each physical package.
35  *    The counters include PKG_C*_RESIDENCY.
36  *
37  * All of these counters are specified in the IntelĀ® 64 and IA-32
38  * Architectures Software Developer.s Manual Vol3b.
39  *
40  * Model specific counters:
41  *      MSR_CORE_C1_RES: CORE C1 Residency Counter
42  *                       perf code: 0x00
43  *                       Available model: SLM,AMT,GLM,CNL,TNT
44  *                       Scope: Core (each processor core has a MSR)
45  *      MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46  *                             perf code: 0x01
47  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
48  *                                              CNL,KBL,CML,TNT
49  *                             Scope: Core
50  *      MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51  *                             perf code: 0x02
52  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
53  *                                              SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
54  *                                              TNT
55  *                             Scope: Core
56  *      MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
57  *                             perf code: 0x03
58  *                             Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
59  *                                              ICL,TGL
60  *                             Scope: Core
61  *      MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
62  *                             perf code: 0x00
63  *                             Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
64  *                                              KBL,CML,ICL,TGL,TNT
65  *                             Scope: Package (physical package)
66  *      MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
67  *                             perf code: 0x01
68  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
69  *                                              GLM,CNL,KBL,CML,ICL,TGL,TNT
70  *                             Scope: Package (physical package)
71  *      MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
72  *                             perf code: 0x02
73  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
74  *                                              SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
75  *                                              TNT
76  *                             Scope: Package (physical package)
77  *      MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
78  *                             perf code: 0x03
79  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
80  *                                              KBL,CML,ICL,TGL
81  *                             Scope: Package (physical package)
82  *      MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
83  *                             perf code: 0x04
84  *                             Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
85  *                             Scope: Package (physical package)
86  *      MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
87  *                             perf code: 0x05
88  *                             Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
89  *                             Scope: Package (physical package)
90  *      MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
91  *                             perf code: 0x06
92  *                             Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
93  *                                              TNT
94  *                             Scope: Package (physical package)
95  *
96  */
97
98 #include <linux/module.h>
99 #include <linux/slab.h>
100 #include <linux/perf_event.h>
101 #include <linux/nospec.h>
102 #include <asm/cpu_device_id.h>
103 #include <asm/intel-family.h>
104 #include "../perf_event.h"
105 #include "../probe.h"
106
107 MODULE_LICENSE("GPL");
108
109 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)         \
110 static ssize_t __cstate_##_var##_show(struct kobject *kobj,     \
111                                 struct kobj_attribute *attr,    \
112                                 char *page)                     \
113 {                                                               \
114         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
115         return sprintf(page, _format "\n");                     \
116 }                                                               \
117 static struct kobj_attribute format_attr_##_var =               \
118         __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
119
120 static ssize_t cstate_get_attr_cpumask(struct device *dev,
121                                        struct device_attribute *attr,
122                                        char *buf);
123
124 /* Model -> events mapping */
125 struct cstate_model {
126         unsigned long           core_events;
127         unsigned long           pkg_events;
128         unsigned long           quirks;
129 };
130
131 /* Quirk flags */
132 #define SLM_PKG_C6_USE_C7_MSR   (1UL << 0)
133 #define KNL_CORE_C6_MSR         (1UL << 1)
134
135 struct perf_cstate_msr {
136         u64     msr;
137         struct  perf_pmu_events_attr *attr;
138 };
139
140
141 /* cstate_core PMU */
142 static struct pmu cstate_core_pmu;
143 static bool has_cstate_core;
144
145 enum perf_cstate_core_events {
146         PERF_CSTATE_CORE_C1_RES = 0,
147         PERF_CSTATE_CORE_C3_RES,
148         PERF_CSTATE_CORE_C6_RES,
149         PERF_CSTATE_CORE_C7_RES,
150
151         PERF_CSTATE_CORE_EVENT_MAX,
152 };
153
154 PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
155 PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
156 PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
157 PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
158
159 static unsigned long core_msr_mask;
160
161 PMU_EVENT_GROUP(events, cstate_core_c1);
162 PMU_EVENT_GROUP(events, cstate_core_c3);
163 PMU_EVENT_GROUP(events, cstate_core_c6);
164 PMU_EVENT_GROUP(events, cstate_core_c7);
165
166 static bool test_msr(int idx, void *data)
167 {
168         return test_bit(idx, (unsigned long *) data);
169 }
170
171 static struct perf_msr core_msr[] = {
172         [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,          &group_cstate_core_c1,  test_msr },
173         [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,    &group_cstate_core_c3,  test_msr },
174         [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,    &group_cstate_core_c6,  test_msr },
175         [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,    &group_cstate_core_c7,  test_msr },
176 };
177
178 static struct attribute *attrs_empty[] = {
179         NULL,
180 };
181
182 /*
183  * There are no default events, but we need to create
184  * "events" group (with empty attrs) before updating
185  * it with detected events.
186  */
187 static struct attribute_group core_events_attr_group = {
188         .name = "events",
189         .attrs = attrs_empty,
190 };
191
192 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
193 static struct attribute *core_format_attrs[] = {
194         &format_attr_core_event.attr,
195         NULL,
196 };
197
198 static struct attribute_group core_format_attr_group = {
199         .name = "format",
200         .attrs = core_format_attrs,
201 };
202
203 static cpumask_t cstate_core_cpu_mask;
204 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
205
206 static struct attribute *cstate_cpumask_attrs[] = {
207         &dev_attr_cpumask.attr,
208         NULL,
209 };
210
211 static struct attribute_group cpumask_attr_group = {
212         .attrs = cstate_cpumask_attrs,
213 };
214
215 static const struct attribute_group *core_attr_groups[] = {
216         &core_events_attr_group,
217         &core_format_attr_group,
218         &cpumask_attr_group,
219         NULL,
220 };
221
222 /* cstate_pkg PMU */
223 static struct pmu cstate_pkg_pmu;
224 static bool has_cstate_pkg;
225
226 enum perf_cstate_pkg_events {
227         PERF_CSTATE_PKG_C2_RES = 0,
228         PERF_CSTATE_PKG_C3_RES,
229         PERF_CSTATE_PKG_C6_RES,
230         PERF_CSTATE_PKG_C7_RES,
231         PERF_CSTATE_PKG_C8_RES,
232         PERF_CSTATE_PKG_C9_RES,
233         PERF_CSTATE_PKG_C10_RES,
234
235         PERF_CSTATE_PKG_EVENT_MAX,
236 };
237
238 PMU_EVENT_ATTR_STRING(c2-residency,  attr_cstate_pkg_c2,  "event=0x00");
239 PMU_EVENT_ATTR_STRING(c3-residency,  attr_cstate_pkg_c3,  "event=0x01");
240 PMU_EVENT_ATTR_STRING(c6-residency,  attr_cstate_pkg_c6,  "event=0x02");
241 PMU_EVENT_ATTR_STRING(c7-residency,  attr_cstate_pkg_c7,  "event=0x03");
242 PMU_EVENT_ATTR_STRING(c8-residency,  attr_cstate_pkg_c8,  "event=0x04");
243 PMU_EVENT_ATTR_STRING(c9-residency,  attr_cstate_pkg_c9,  "event=0x05");
244 PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
245
246 static unsigned long pkg_msr_mask;
247
248 PMU_EVENT_GROUP(events, cstate_pkg_c2);
249 PMU_EVENT_GROUP(events, cstate_pkg_c3);
250 PMU_EVENT_GROUP(events, cstate_pkg_c6);
251 PMU_EVENT_GROUP(events, cstate_pkg_c7);
252 PMU_EVENT_GROUP(events, cstate_pkg_c8);
253 PMU_EVENT_GROUP(events, cstate_pkg_c9);
254 PMU_EVENT_GROUP(events, cstate_pkg_c10);
255
256 static struct perf_msr pkg_msr[] = {
257         [PERF_CSTATE_PKG_C2_RES]  = { MSR_PKG_C2_RESIDENCY,     &group_cstate_pkg_c2,   test_msr },
258         [PERF_CSTATE_PKG_C3_RES]  = { MSR_PKG_C3_RESIDENCY,     &group_cstate_pkg_c3,   test_msr },
259         [PERF_CSTATE_PKG_C6_RES]  = { MSR_PKG_C6_RESIDENCY,     &group_cstate_pkg_c6,   test_msr },
260         [PERF_CSTATE_PKG_C7_RES]  = { MSR_PKG_C7_RESIDENCY,     &group_cstate_pkg_c7,   test_msr },
261         [PERF_CSTATE_PKG_C8_RES]  = { MSR_PKG_C8_RESIDENCY,     &group_cstate_pkg_c8,   test_msr },
262         [PERF_CSTATE_PKG_C9_RES]  = { MSR_PKG_C9_RESIDENCY,     &group_cstate_pkg_c9,   test_msr },
263         [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,    &group_cstate_pkg_c10,  test_msr },
264 };
265
266 static struct attribute_group pkg_events_attr_group = {
267         .name = "events",
268         .attrs = attrs_empty,
269 };
270
271 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
272 static struct attribute *pkg_format_attrs[] = {
273         &format_attr_pkg_event.attr,
274         NULL,
275 };
276 static struct attribute_group pkg_format_attr_group = {
277         .name = "format",
278         .attrs = pkg_format_attrs,
279 };
280
281 static cpumask_t cstate_pkg_cpu_mask;
282
283 static const struct attribute_group *pkg_attr_groups[] = {
284         &pkg_events_attr_group,
285         &pkg_format_attr_group,
286         &cpumask_attr_group,
287         NULL,
288 };
289
290 static ssize_t cstate_get_attr_cpumask(struct device *dev,
291                                        struct device_attribute *attr,
292                                        char *buf)
293 {
294         struct pmu *pmu = dev_get_drvdata(dev);
295
296         if (pmu == &cstate_core_pmu)
297                 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
298         else if (pmu == &cstate_pkg_pmu)
299                 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
300         else
301                 return 0;
302 }
303
304 static int cstate_pmu_event_init(struct perf_event *event)
305 {
306         u64 cfg = event->attr.config;
307         int cpu;
308
309         if (event->attr.type != event->pmu->type)
310                 return -ENOENT;
311
312         /* unsupported modes and filters */
313         if (event->attr.sample_period) /* no sampling */
314                 return -EINVAL;
315
316         if (event->cpu < 0)
317                 return -EINVAL;
318
319         if (event->pmu == &cstate_core_pmu) {
320                 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
321                         return -EINVAL;
322                 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
323                 if (!(core_msr_mask & (1 << cfg)))
324                         return -EINVAL;
325                 event->hw.event_base = core_msr[cfg].msr;
326                 cpu = cpumask_any_and(&cstate_core_cpu_mask,
327                                       topology_sibling_cpumask(event->cpu));
328         } else if (event->pmu == &cstate_pkg_pmu) {
329                 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
330                         return -EINVAL;
331                 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
332                 if (!(pkg_msr_mask & (1 << cfg)))
333                         return -EINVAL;
334                 event->hw.event_base = pkg_msr[cfg].msr;
335                 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
336                                       topology_die_cpumask(event->cpu));
337         } else {
338                 return -ENOENT;
339         }
340
341         if (cpu >= nr_cpu_ids)
342                 return -ENODEV;
343
344         event->cpu = cpu;
345         event->hw.config = cfg;
346         event->hw.idx = -1;
347         return 0;
348 }
349
350 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
351 {
352         u64 val;
353
354         rdmsrl(event->hw.event_base, val);
355         return val;
356 }
357
358 static void cstate_pmu_event_update(struct perf_event *event)
359 {
360         struct hw_perf_event *hwc = &event->hw;
361         u64 prev_raw_count, new_raw_count;
362
363 again:
364         prev_raw_count = local64_read(&hwc->prev_count);
365         new_raw_count = cstate_pmu_read_counter(event);
366
367         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
368                             new_raw_count) != prev_raw_count)
369                 goto again;
370
371         local64_add(new_raw_count - prev_raw_count, &event->count);
372 }
373
374 static void cstate_pmu_event_start(struct perf_event *event, int mode)
375 {
376         local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
377 }
378
379 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
380 {
381         cstate_pmu_event_update(event);
382 }
383
384 static void cstate_pmu_event_del(struct perf_event *event, int mode)
385 {
386         cstate_pmu_event_stop(event, PERF_EF_UPDATE);
387 }
388
389 static int cstate_pmu_event_add(struct perf_event *event, int mode)
390 {
391         if (mode & PERF_EF_START)
392                 cstate_pmu_event_start(event, mode);
393
394         return 0;
395 }
396
397 /*
398  * Check if exiting cpu is the designated reader. If so migrate the
399  * events when there is a valid target available
400  */
401 static int cstate_cpu_exit(unsigned int cpu)
402 {
403         unsigned int target;
404
405         if (has_cstate_core &&
406             cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
407
408                 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
409                 /* Migrate events if there is a valid target */
410                 if (target < nr_cpu_ids) {
411                         cpumask_set_cpu(target, &cstate_core_cpu_mask);
412                         perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
413                 }
414         }
415
416         if (has_cstate_pkg &&
417             cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
418
419                 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
420                 /* Migrate events if there is a valid target */
421                 if (target < nr_cpu_ids) {
422                         cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
423                         perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
424                 }
425         }
426         return 0;
427 }
428
429 static int cstate_cpu_init(unsigned int cpu)
430 {
431         unsigned int target;
432
433         /*
434          * If this is the first online thread of that core, set it in
435          * the core cpu mask as the designated reader.
436          */
437         target = cpumask_any_and(&cstate_core_cpu_mask,
438                                  topology_sibling_cpumask(cpu));
439
440         if (has_cstate_core && target >= nr_cpu_ids)
441                 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
442
443         /*
444          * If this is the first online thread of that package, set it
445          * in the package cpu mask as the designated reader.
446          */
447         target = cpumask_any_and(&cstate_pkg_cpu_mask,
448                                  topology_die_cpumask(cpu));
449         if (has_cstate_pkg && target >= nr_cpu_ids)
450                 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
451
452         return 0;
453 }
454
455 static const struct attribute_group *core_attr_update[] = {
456         &group_cstate_core_c1,
457         &group_cstate_core_c3,
458         &group_cstate_core_c6,
459         &group_cstate_core_c7,
460         NULL,
461 };
462
463 static const struct attribute_group *pkg_attr_update[] = {
464         &group_cstate_pkg_c2,
465         &group_cstate_pkg_c3,
466         &group_cstate_pkg_c6,
467         &group_cstate_pkg_c7,
468         &group_cstate_pkg_c8,
469         &group_cstate_pkg_c9,
470         &group_cstate_pkg_c10,
471         NULL,
472 };
473
474 static struct pmu cstate_core_pmu = {
475         .attr_groups    = core_attr_groups,
476         .attr_update    = core_attr_update,
477         .name           = "cstate_core",
478         .task_ctx_nr    = perf_invalid_context,
479         .event_init     = cstate_pmu_event_init,
480         .add            = cstate_pmu_event_add,
481         .del            = cstate_pmu_event_del,
482         .start          = cstate_pmu_event_start,
483         .stop           = cstate_pmu_event_stop,
484         .read           = cstate_pmu_event_update,
485         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
486         .module         = THIS_MODULE,
487 };
488
489 static struct pmu cstate_pkg_pmu = {
490         .attr_groups    = pkg_attr_groups,
491         .attr_update    = pkg_attr_update,
492         .name           = "cstate_pkg",
493         .task_ctx_nr    = perf_invalid_context,
494         .event_init     = cstate_pmu_event_init,
495         .add            = cstate_pmu_event_add,
496         .del            = cstate_pmu_event_del,
497         .start          = cstate_pmu_event_start,
498         .stop           = cstate_pmu_event_stop,
499         .read           = cstate_pmu_event_update,
500         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
501         .module         = THIS_MODULE,
502 };
503
504 static const struct cstate_model nhm_cstates __initconst = {
505         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
506                                   BIT(PERF_CSTATE_CORE_C6_RES),
507
508         .pkg_events             = BIT(PERF_CSTATE_PKG_C3_RES) |
509                                   BIT(PERF_CSTATE_PKG_C6_RES) |
510                                   BIT(PERF_CSTATE_PKG_C7_RES),
511 };
512
513 static const struct cstate_model snb_cstates __initconst = {
514         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
515                                   BIT(PERF_CSTATE_CORE_C6_RES) |
516                                   BIT(PERF_CSTATE_CORE_C7_RES),
517
518         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
519                                   BIT(PERF_CSTATE_PKG_C3_RES) |
520                                   BIT(PERF_CSTATE_PKG_C6_RES) |
521                                   BIT(PERF_CSTATE_PKG_C7_RES),
522 };
523
524 static const struct cstate_model hswult_cstates __initconst = {
525         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
526                                   BIT(PERF_CSTATE_CORE_C6_RES) |
527                                   BIT(PERF_CSTATE_CORE_C7_RES),
528
529         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
530                                   BIT(PERF_CSTATE_PKG_C3_RES) |
531                                   BIT(PERF_CSTATE_PKG_C6_RES) |
532                                   BIT(PERF_CSTATE_PKG_C7_RES) |
533                                   BIT(PERF_CSTATE_PKG_C8_RES) |
534                                   BIT(PERF_CSTATE_PKG_C9_RES) |
535                                   BIT(PERF_CSTATE_PKG_C10_RES),
536 };
537
538 static const struct cstate_model cnl_cstates __initconst = {
539         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
540                                   BIT(PERF_CSTATE_CORE_C3_RES) |
541                                   BIT(PERF_CSTATE_CORE_C6_RES) |
542                                   BIT(PERF_CSTATE_CORE_C7_RES),
543
544         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
545                                   BIT(PERF_CSTATE_PKG_C3_RES) |
546                                   BIT(PERF_CSTATE_PKG_C6_RES) |
547                                   BIT(PERF_CSTATE_PKG_C7_RES) |
548                                   BIT(PERF_CSTATE_PKG_C8_RES) |
549                                   BIT(PERF_CSTATE_PKG_C9_RES) |
550                                   BIT(PERF_CSTATE_PKG_C10_RES),
551 };
552
553 static const struct cstate_model icl_cstates __initconst = {
554         .core_events            = BIT(PERF_CSTATE_CORE_C6_RES) |
555                                   BIT(PERF_CSTATE_CORE_C7_RES),
556
557         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
558                                   BIT(PERF_CSTATE_PKG_C3_RES) |
559                                   BIT(PERF_CSTATE_PKG_C6_RES) |
560                                   BIT(PERF_CSTATE_PKG_C7_RES) |
561                                   BIT(PERF_CSTATE_PKG_C8_RES) |
562                                   BIT(PERF_CSTATE_PKG_C9_RES) |
563                                   BIT(PERF_CSTATE_PKG_C10_RES),
564 };
565
566 static const struct cstate_model slm_cstates __initconst = {
567         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
568                                   BIT(PERF_CSTATE_CORE_C6_RES),
569
570         .pkg_events             = BIT(PERF_CSTATE_PKG_C6_RES),
571         .quirks                 = SLM_PKG_C6_USE_C7_MSR,
572 };
573
574
575 static const struct cstate_model knl_cstates __initconst = {
576         .core_events            = BIT(PERF_CSTATE_CORE_C6_RES),
577
578         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
579                                   BIT(PERF_CSTATE_PKG_C3_RES) |
580                                   BIT(PERF_CSTATE_PKG_C6_RES),
581         .quirks                 = KNL_CORE_C6_MSR,
582 };
583
584
585 static const struct cstate_model glm_cstates __initconst = {
586         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
587                                   BIT(PERF_CSTATE_CORE_C3_RES) |
588                                   BIT(PERF_CSTATE_CORE_C6_RES),
589
590         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
591                                   BIT(PERF_CSTATE_PKG_C3_RES) |
592                                   BIT(PERF_CSTATE_PKG_C6_RES) |
593                                   BIT(PERF_CSTATE_PKG_C10_RES),
594 };
595
596
597 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
598         X86_MATCH_INTEL_FAM6_MODEL(NEHALEM,             &nhm_cstates),
599         X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP,          &nhm_cstates),
600         X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX,          &nhm_cstates),
601
602         X86_MATCH_INTEL_FAM6_MODEL(WESTMERE,            &nhm_cstates),
603         X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP,         &nhm_cstates),
604         X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX,         &nhm_cstates),
605
606         X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE,         &snb_cstates),
607         X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X,       &snb_cstates),
608
609         X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE,           &snb_cstates),
610         X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X,         &snb_cstates),
611
612         X86_MATCH_INTEL_FAM6_MODEL(HASWELL,             &snb_cstates),
613         X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X,           &snb_cstates),
614         X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G,           &snb_cstates),
615
616         X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L,           &hswult_cstates),
617
618         X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &slm_cstates),
619         X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D,   &slm_cstates),
620         X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &slm_cstates),
621
622         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL,           &snb_cstates),
623         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,         &snb_cstates),
624         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G,         &snb_cstates),
625         X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,         &snb_cstates),
626
627         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,           &snb_cstates),
628         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,             &snb_cstates),
629         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,           &snb_cstates),
630
631         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,          &hswult_cstates),
632         X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,            &hswult_cstates),
633         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         &hswult_cstates),
634         X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &hswult_cstates),
635
636         X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,        &cnl_cstates),
637
638         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,        &knl_cstates),
639         X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,        &knl_cstates),
640
641         X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,       &glm_cstates),
642         X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,     &glm_cstates),
643         X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS,  &glm_cstates),
644         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &glm_cstates),
645         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        &glm_cstates),
646         X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &glm_cstates),
647
648         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,           &icl_cstates),
649         X86_MATCH_INTEL_FAM6_MODEL(ICELAKE,             &icl_cstates),
650         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &icl_cstates),
651         X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &icl_cstates),
652         { },
653 };
654 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
655
656 static int __init cstate_probe(const struct cstate_model *cm)
657 {
658         /* SLM has different MSR for PKG C6 */
659         if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
660                 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
661
662         /* KNL has different MSR for CORE C6 */
663         if (cm->quirks & KNL_CORE_C6_MSR)
664                 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
665
666
667         core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
668                                        true, (void *) &cm->core_events);
669
670         pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
671                                       true, (void *) &cm->pkg_events);
672
673         has_cstate_core = !!core_msr_mask;
674         has_cstate_pkg  = !!pkg_msr_mask;
675
676         return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
677 }
678
679 static inline void cstate_cleanup(void)
680 {
681         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
682         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
683
684         if (has_cstate_core)
685                 perf_pmu_unregister(&cstate_core_pmu);
686
687         if (has_cstate_pkg)
688                 perf_pmu_unregister(&cstate_pkg_pmu);
689 }
690
691 static int __init cstate_init(void)
692 {
693         int err;
694
695         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
696                           "perf/x86/cstate:starting", cstate_cpu_init, NULL);
697         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
698                           "perf/x86/cstate:online", NULL, cstate_cpu_exit);
699
700         if (has_cstate_core) {
701                 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
702                 if (err) {
703                         has_cstate_core = false;
704                         pr_info("Failed to register cstate core pmu\n");
705                         cstate_cleanup();
706                         return err;
707                 }
708         }
709
710         if (has_cstate_pkg) {
711                 if (topology_max_die_per_package() > 1) {
712                         err = perf_pmu_register(&cstate_pkg_pmu,
713                                                 "cstate_die", -1);
714                 } else {
715                         err = perf_pmu_register(&cstate_pkg_pmu,
716                                                 cstate_pkg_pmu.name, -1);
717                 }
718                 if (err) {
719                         has_cstate_pkg = false;
720                         pr_info("Failed to register cstate pkg pmu\n");
721                         cstate_cleanup();
722                         return err;
723                 }
724         }
725         return 0;
726 }
727
728 static int __init cstate_pmu_init(void)
729 {
730         const struct x86_cpu_id *id;
731         int err;
732
733         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
734                 return -ENODEV;
735
736         id = x86_match_cpu(intel_cstates_match);
737         if (!id)
738                 return -ENODEV;
739
740         err = cstate_probe((const struct cstate_model *) id->driver_data);
741         if (err)
742                 return err;
743
744         return cstate_init();
745 }
746 module_init(cstate_pmu_init);
747
748 static void __exit cstate_pmu_exit(void)
749 {
750         cstate_cleanup();
751 }
752 module_exit(cstate_pmu_exit);