1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support Intel RAPL energy consumption counters
4 * Copyright (C) 2013 Google, Inc., Stephane Eranian
6 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
7 * section 14.7.1 (September 2013)
9 * RAPL provides more controls than just reporting energy consumption
10 * however here we only expose the 3 energy consumption free running
11 * counters (pp0, pkg, dram).
13 * Each of those counters increments in a power unit defined by the
14 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
17 * Counter to rapl events mappings:
19 * pp0 counter: consumption of all physical cores (power plane 0)
20 * event: rapl_energy_cores
23 * pkg counter: consumption of the whole processor package
24 * event: rapl_energy_pkg
27 * dram counter: consumption of the dram domain (servers only)
28 * event: rapl_energy_dram
31 * gpu counter: consumption of the builtin-gpu domain (client only)
32 * event: rapl_energy_gpu
35 * psys counter: consumption of the builtin-psys domain (client only)
36 * event: rapl_energy_psys
39 * We manage those counters as free running (read-only). They may be
40 * use simultaneously by other tools, such as turbostat.
42 * The events only support system-wide mode counting. There is no
43 * sampling support because it does not make sense and is not
44 * supported by the RAPL hardware.
46 * Because we want to avoid floating-point operations in the kernel,
47 * the events are all reported in fixed point arithmetic (32.32).
48 * Tools must adjust the counts to convert them to Watts using
49 * the duration of the measurement. Tools may use a function such as
50 * ldexp(raw_count, -32);
53 #define pr_fmt(fmt) "RAPL PMU: " fmt
55 #include <linux/module.h>
56 #include <linux/slab.h>
57 #include <linux/perf_event.h>
58 #include <asm/cpu_device_id.h>
59 #include <asm/intel-family.h>
60 #include "../perf_event.h"
63 MODULE_LICENSE("GPL");
66 * RAPL energy status counters
68 #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
69 #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
70 #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
71 #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
72 #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
73 #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
74 #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
75 #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
76 #define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
77 #define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
79 #define NR_RAPL_DOMAINS 0x5
81 enum perf_rapl_events {
82 PERF_RAPL_PP0 = 0, /* all cores */
83 PERF_RAPL_PKG, /* entire package */
84 PERF_RAPL_RAM, /* DRAM */
85 PERF_RAPL_PP1, /* gpu */
86 PERF_RAPL_PSYS, /* psys */
91 static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
99 /* Clients have PP0, PKG */
100 #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
101 1<<RAPL_IDX_PKG_NRG_STAT|\
102 1<<RAPL_IDX_PP1_NRG_STAT)
104 /* Servers have PP0, PKG, RAM */
105 #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
106 1<<RAPL_IDX_PKG_NRG_STAT|\
107 1<<RAPL_IDX_RAM_NRG_STAT)
109 /* Servers have PP0, PKG, RAM, PP1 */
110 #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
111 1<<RAPL_IDX_PKG_NRG_STAT|\
112 1<<RAPL_IDX_RAM_NRG_STAT|\
113 1<<RAPL_IDX_PP1_NRG_STAT)
115 /* SKL clients have PP0, PKG, RAM, PP1, PSYS */
116 #define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
117 1<<RAPL_IDX_PKG_NRG_STAT|\
118 1<<RAPL_IDX_RAM_NRG_STAT|\
119 1<<RAPL_IDX_PP1_NRG_STAT|\
120 1<<RAPL_IDX_PSYS_NRG_STAT)
122 /* Knights Landing has PKG, RAM */
123 #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
124 1<<RAPL_IDX_RAM_NRG_STAT)
127 * event code: LSB 8 bits, passed in attr->config
128 * any other bit is reserved
130 #define RAPL_EVENT_MASK 0xFFULL
132 #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
133 static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
134 struct kobj_attribute *attr, \
137 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
138 return sprintf(page, _format "\n"); \
140 static struct kobj_attribute format_attr_##_var = \
141 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
143 #define RAPL_CNTR_WIDTH 32
145 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
146 static struct perf_pmu_events_attr event_attr_##v = { \
147 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
156 struct list_head active_list;
158 ktime_t timer_interval;
159 struct hrtimer hrtimer;
165 struct rapl_pmu *pmus[];
169 unsigned long events;
173 /* 1/2^hw_unit Joule */
174 static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
175 static struct rapl_pmus *rapl_pmus;
176 static cpumask_t rapl_cpu_mask;
177 static unsigned int rapl_cntr_mask;
178 static u64 rapl_timer_ms;
180 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
182 unsigned int dieid = topology_logical_die_id(cpu);
185 * The unsigned check also catches the '-1' return value for non
186 * existent mappings in the topology map.
188 return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
191 static inline u64 rapl_read_counter(struct perf_event *event)
194 rdmsrl(event->hw.event_base, raw);
198 static inline u64 rapl_scale(u64 v, int cfg)
200 if (cfg > NR_RAPL_DOMAINS) {
201 pr_warn("Invalid domain %d, failed to scale data\n", cfg);
205 * scale delta to smallest unit (1/2^32)
206 * users must then scale back: count * 1/(1e9*2^32) to get Joules
207 * or use ldexp(count, -32).
208 * Watts = Joules/Time delta
210 return v << (32 - rapl_hw_unit[cfg - 1]);
213 static u64 rapl_event_update(struct perf_event *event)
215 struct hw_perf_event *hwc = &event->hw;
216 u64 prev_raw_count, new_raw_count;
218 int shift = RAPL_CNTR_WIDTH;
221 prev_raw_count = local64_read(&hwc->prev_count);
222 rdmsrl(event->hw.event_base, new_raw_count);
224 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
225 new_raw_count) != prev_raw_count) {
231 * Now we have the new raw value and have updated the prev
232 * timestamp already. We can now calculate the elapsed delta
233 * (event-)time and add that to the generic event.
235 * Careful, not all hw sign-extends above the physical width
238 delta = (new_raw_count << shift) - (prev_raw_count << shift);
241 sdelta = rapl_scale(delta, event->hw.config);
243 local64_add(sdelta, &event->count);
245 return new_raw_count;
248 static void rapl_start_hrtimer(struct rapl_pmu *pmu)
250 hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
251 HRTIMER_MODE_REL_PINNED);
254 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
256 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
257 struct perf_event *event;
261 return HRTIMER_NORESTART;
263 raw_spin_lock_irqsave(&pmu->lock, flags);
265 list_for_each_entry(event, &pmu->active_list, active_entry)
266 rapl_event_update(event);
268 raw_spin_unlock_irqrestore(&pmu->lock, flags);
270 hrtimer_forward_now(hrtimer, pmu->timer_interval);
272 return HRTIMER_RESTART;
275 static void rapl_hrtimer_init(struct rapl_pmu *pmu)
277 struct hrtimer *hr = &pmu->hrtimer;
279 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
280 hr->function = rapl_hrtimer_handle;
283 static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
284 struct perf_event *event)
286 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
291 list_add_tail(&event->active_entry, &pmu->active_list);
293 local64_set(&event->hw.prev_count, rapl_read_counter(event));
296 if (pmu->n_active == 1)
297 rapl_start_hrtimer(pmu);
300 static void rapl_pmu_event_start(struct perf_event *event, int mode)
302 struct rapl_pmu *pmu = event->pmu_private;
305 raw_spin_lock_irqsave(&pmu->lock, flags);
306 __rapl_pmu_event_start(pmu, event);
307 raw_spin_unlock_irqrestore(&pmu->lock, flags);
310 static void rapl_pmu_event_stop(struct perf_event *event, int mode)
312 struct rapl_pmu *pmu = event->pmu_private;
313 struct hw_perf_event *hwc = &event->hw;
316 raw_spin_lock_irqsave(&pmu->lock, flags);
318 /* mark event as deactivated and stopped */
319 if (!(hwc->state & PERF_HES_STOPPED)) {
320 WARN_ON_ONCE(pmu->n_active <= 0);
322 if (pmu->n_active == 0)
323 hrtimer_cancel(&pmu->hrtimer);
325 list_del(&event->active_entry);
327 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
328 hwc->state |= PERF_HES_STOPPED;
331 /* check if update of sw counter is necessary */
332 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
334 * Drain the remaining delta count out of a event
335 * that we are disabling:
337 rapl_event_update(event);
338 hwc->state |= PERF_HES_UPTODATE;
341 raw_spin_unlock_irqrestore(&pmu->lock, flags);
344 static int rapl_pmu_event_add(struct perf_event *event, int mode)
346 struct rapl_pmu *pmu = event->pmu_private;
347 struct hw_perf_event *hwc = &event->hw;
350 raw_spin_lock_irqsave(&pmu->lock, flags);
352 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
354 if (mode & PERF_EF_START)
355 __rapl_pmu_event_start(pmu, event);
357 raw_spin_unlock_irqrestore(&pmu->lock, flags);
362 static void rapl_pmu_event_del(struct perf_event *event, int flags)
364 rapl_pmu_event_stop(event, PERF_EF_UPDATE);
367 static int rapl_pmu_event_init(struct perf_event *event)
369 u64 cfg = event->attr.config & RAPL_EVENT_MASK;
370 int bit, msr, ret = 0;
371 struct rapl_pmu *pmu;
373 /* only look at RAPL events */
374 if (event->attr.type != rapl_pmus->pmu.type)
377 /* check only supported bits are set */
378 if (event->attr.config & ~RAPL_EVENT_MASK)
384 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
387 * check event is known (determines counter)
391 bit = RAPL_IDX_PP0_NRG_STAT;
392 msr = MSR_PP0_ENERGY_STATUS;
395 bit = RAPL_IDX_PKG_NRG_STAT;
396 msr = MSR_PKG_ENERGY_STATUS;
399 bit = RAPL_IDX_RAM_NRG_STAT;
400 msr = MSR_DRAM_ENERGY_STATUS;
403 bit = RAPL_IDX_PP1_NRG_STAT;
404 msr = MSR_PP1_ENERGY_STATUS;
406 case INTEL_RAPL_PSYS:
407 bit = RAPL_IDX_PSYS_NRG_STAT;
408 msr = MSR_PLATFORM_ENERGY_STATUS;
413 /* check event supported */
414 if (!(rapl_cntr_mask & (1 << bit)))
417 /* unsupported modes and filters */
418 if (event->attr.sample_period) /* no sampling */
421 /* must be done before validate_group */
422 pmu = cpu_to_rapl_pmu(event->cpu);
425 event->cpu = pmu->cpu;
426 event->pmu_private = pmu;
427 event->hw.event_base = msr;
428 event->hw.config = cfg;
434 static void rapl_pmu_event_read(struct perf_event *event)
436 rapl_event_update(event);
439 static ssize_t rapl_get_attr_cpumask(struct device *dev,
440 struct device_attribute *attr, char *buf)
442 return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
445 static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
447 static struct attribute *rapl_pmu_attrs[] = {
448 &dev_attr_cpumask.attr,
452 static struct attribute_group rapl_pmu_attr_group = {
453 .attrs = rapl_pmu_attrs,
456 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
457 RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
458 RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
459 RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
460 RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
462 RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
463 RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
464 RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
465 RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
466 RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
469 * we compute in 0.23 nJ increments regardless of MSR
471 RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
472 RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
473 RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
474 RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
475 RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
477 static struct attribute *rapl_events_srv_attr[] = {
478 EVENT_PTR(rapl_cores),
482 EVENT_PTR(rapl_cores_unit),
483 EVENT_PTR(rapl_pkg_unit),
484 EVENT_PTR(rapl_ram_unit),
486 EVENT_PTR(rapl_cores_scale),
487 EVENT_PTR(rapl_pkg_scale),
488 EVENT_PTR(rapl_ram_scale),
492 static struct attribute *rapl_events_cln_attr[] = {
493 EVENT_PTR(rapl_cores),
497 EVENT_PTR(rapl_cores_unit),
498 EVENT_PTR(rapl_pkg_unit),
499 EVENT_PTR(rapl_gpu_unit),
501 EVENT_PTR(rapl_cores_scale),
502 EVENT_PTR(rapl_pkg_scale),
503 EVENT_PTR(rapl_gpu_scale),
507 static struct attribute *rapl_events_hsw_attr[] = {
508 EVENT_PTR(rapl_cores),
513 EVENT_PTR(rapl_cores_unit),
514 EVENT_PTR(rapl_pkg_unit),
515 EVENT_PTR(rapl_gpu_unit),
516 EVENT_PTR(rapl_ram_unit),
518 EVENT_PTR(rapl_cores_scale),
519 EVENT_PTR(rapl_pkg_scale),
520 EVENT_PTR(rapl_gpu_scale),
521 EVENT_PTR(rapl_ram_scale),
525 static struct attribute *rapl_events_skl_attr[] = {
526 EVENT_PTR(rapl_cores),
530 EVENT_PTR(rapl_psys),
532 EVENT_PTR(rapl_cores_unit),
533 EVENT_PTR(rapl_pkg_unit),
534 EVENT_PTR(rapl_gpu_unit),
535 EVENT_PTR(rapl_ram_unit),
536 EVENT_PTR(rapl_psys_unit),
538 EVENT_PTR(rapl_cores_scale),
539 EVENT_PTR(rapl_pkg_scale),
540 EVENT_PTR(rapl_gpu_scale),
541 EVENT_PTR(rapl_ram_scale),
542 EVENT_PTR(rapl_psys_scale),
546 static struct attribute *rapl_events_knl_attr[] = {
550 EVENT_PTR(rapl_pkg_unit),
551 EVENT_PTR(rapl_ram_unit),
553 EVENT_PTR(rapl_pkg_scale),
554 EVENT_PTR(rapl_ram_scale),
559 * There are no default events, but we need to create
560 * "events" group (with empty attrs) before updating
561 * it with detected events.
563 static struct attribute *attrs_empty[] = {
567 static struct attribute_group rapl_pmu_events_group = {
569 .attrs = attrs_empty,
572 DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
573 static struct attribute *rapl_formats_attr[] = {
574 &format_attr_event.attr,
578 static struct attribute_group rapl_pmu_format_group = {
580 .attrs = rapl_formats_attr,
583 static const struct attribute_group *rapl_attr_groups[] = {
584 &rapl_pmu_attr_group,
585 &rapl_pmu_format_group,
586 &rapl_pmu_events_group,
590 static struct attribute *rapl_events_cores[] = {
591 EVENT_PTR(rapl_cores),
592 EVENT_PTR(rapl_cores_unit),
593 EVENT_PTR(rapl_cores_scale),
597 static struct attribute_group rapl_events_cores_group = {
599 .attrs = rapl_events_cores,
602 static struct attribute *rapl_events_pkg[] = {
604 EVENT_PTR(rapl_pkg_unit),
605 EVENT_PTR(rapl_pkg_scale),
609 static struct attribute_group rapl_events_pkg_group = {
611 .attrs = rapl_events_pkg,
614 static struct attribute *rapl_events_ram[] = {
616 EVENT_PTR(rapl_ram_unit),
617 EVENT_PTR(rapl_ram_scale),
621 static struct attribute_group rapl_events_ram_group = {
623 .attrs = rapl_events_ram,
626 static struct attribute *rapl_events_gpu[] = {
628 EVENT_PTR(rapl_gpu_unit),
629 EVENT_PTR(rapl_gpu_scale),
633 static struct attribute_group rapl_events_gpu_group = {
635 .attrs = rapl_events_gpu,
638 static struct attribute *rapl_events_psys[] = {
639 EVENT_PTR(rapl_psys),
640 EVENT_PTR(rapl_psys_unit),
641 EVENT_PTR(rapl_psys_scale),
645 static struct attribute_group rapl_events_psys_group = {
647 .attrs = rapl_events_psys,
650 static bool test_msr(int idx, void *data)
652 return test_bit(idx, (unsigned long *) data);
655 static struct perf_msr rapl_msrs[] = {
656 [PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr },
657 [PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
658 [PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr },
659 [PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr },
660 [PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr },
663 static int rapl_cpu_offline(unsigned int cpu)
665 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
668 /* Check if exiting cpu is used for collecting rapl events */
669 if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
673 /* Find a new cpu to collect rapl events */
674 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
676 /* Migrate rapl events to the new target */
677 if (target < nr_cpu_ids) {
678 cpumask_set_cpu(target, &rapl_cpu_mask);
680 perf_pmu_migrate_context(pmu->pmu, cpu, target);
685 static int rapl_cpu_online(unsigned int cpu)
687 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
691 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
695 raw_spin_lock_init(&pmu->lock);
696 INIT_LIST_HEAD(&pmu->active_list);
697 pmu->pmu = &rapl_pmus->pmu;
698 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
699 rapl_hrtimer_init(pmu);
701 rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
705 * Check if there is an online cpu in the package which collects rapl
708 target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
709 if (target < nr_cpu_ids)
712 cpumask_set_cpu(cpu, &rapl_cpu_mask);
717 static int rapl_check_hw_unit(bool apply_quirk)
719 u64 msr_rapl_power_unit_bits;
722 /* protect rdmsrl() to handle virtualization */
723 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
725 for (i = 0; i < NR_RAPL_DOMAINS; i++)
726 rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
729 * DRAM domain on HSW server and KNL has fixed energy unit which can be
730 * different than the unit from power unit MSR. See
731 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
732 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
735 rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
738 * Calculate the timer rate:
739 * Use reference of 200W for scaling the timeout to avoid counter
740 * overflows. 200W = 200 Joules/sec
741 * Divide interval by 2 to avoid lockstep (2 * 100)
742 * if hw unit is 32, then we use 2 ms 1/200/2
745 if (rapl_hw_unit[0] < 32) {
746 rapl_timer_ms = (1000 / (2 * 100));
747 rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
752 static void __init rapl_advertise(void)
756 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
757 hweight32(rapl_cntr_mask), rapl_timer_ms);
759 for (i = 0; i < NR_RAPL_DOMAINS; i++) {
760 if (rapl_cntr_mask & (1 << i)) {
761 pr_info("hw unit of domain %s 2^-%d Joules\n",
762 rapl_domain_names[i], rapl_hw_unit[i]);
767 static void cleanup_rapl_pmus(void)
771 for (i = 0; i < rapl_pmus->maxdie; i++)
772 kfree(rapl_pmus->pmus[i]);
776 const struct attribute_group *rapl_attr_update[] = {
777 &rapl_events_cores_group,
778 &rapl_events_pkg_group,
779 &rapl_events_ram_group,
780 &rapl_events_gpu_group,
781 &rapl_events_gpu_group,
785 static int __init init_rapl_pmus(void)
787 int maxdie = topology_max_packages() * topology_max_die_per_package();
790 size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
791 rapl_pmus = kzalloc(size, GFP_KERNEL);
795 rapl_pmus->maxdie = maxdie;
796 rapl_pmus->pmu.attr_groups = rapl_attr_groups;
797 rapl_pmus->pmu.attr_update = rapl_attr_update;
798 rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
799 rapl_pmus->pmu.event_init = rapl_pmu_event_init;
800 rapl_pmus->pmu.add = rapl_pmu_event_add;
801 rapl_pmus->pmu.del = rapl_pmu_event_del;
802 rapl_pmus->pmu.start = rapl_pmu_event_start;
803 rapl_pmus->pmu.stop = rapl_pmu_event_stop;
804 rapl_pmus->pmu.read = rapl_pmu_event_read;
805 rapl_pmus->pmu.module = THIS_MODULE;
806 rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
810 #define X86_RAPL_MODEL_MATCH(model, init) \
811 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
813 struct intel_rapl_init_fun {
816 struct attribute **attrs;
819 static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
820 .apply_quirk = false,
821 .cntr_mask = RAPL_IDX_CLN,
822 .attrs = rapl_events_cln_attr,
825 static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
827 .cntr_mask = RAPL_IDX_SRV,
828 .attrs = rapl_events_srv_attr,
831 static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
832 .apply_quirk = false,
833 .cntr_mask = RAPL_IDX_HSW,
834 .attrs = rapl_events_hsw_attr,
837 static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
838 .apply_quirk = false,
839 .cntr_mask = RAPL_IDX_SRV,
840 .attrs = rapl_events_srv_attr,
843 static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
845 .cntr_mask = RAPL_IDX_KNL,
846 .attrs = rapl_events_knl_attr,
849 static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
850 .apply_quirk = false,
851 .cntr_mask = RAPL_IDX_SKL_CLN,
852 .attrs = rapl_events_skl_attr,
855 static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
856 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
857 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
859 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
860 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
862 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
863 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
864 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
865 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
867 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
868 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
869 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
870 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
872 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
873 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
875 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
876 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
877 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
879 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init),
880 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
882 X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init),
884 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
885 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
887 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
889 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
890 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, skl_rapl_init),
894 MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
896 static struct rapl_model model_snb = {
897 .events = BIT(PERF_RAPL_PP0) |
900 .apply_quirk = false,
903 static struct rapl_model model_snbep = {
904 .events = BIT(PERF_RAPL_PP0) |
907 .apply_quirk = false,
910 static struct rapl_model model_hsw = {
911 .events = BIT(PERF_RAPL_PP0) |
915 .apply_quirk = false,
918 static struct rapl_model model_hsx = {
919 .events = BIT(PERF_RAPL_PP0) |
925 static struct rapl_model model_knl = {
926 .events = BIT(PERF_RAPL_PKG) |
931 static struct rapl_model model_skl = {
932 .events = BIT(PERF_RAPL_PP0) |
937 .apply_quirk = false,
940 static const struct x86_cpu_id rapl_model_match[] __initconst = {
941 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, model_snb),
942 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep),
943 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb),
944 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep),
945 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, model_hsw),
946 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx),
947 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, model_hsw),
948 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, model_hsw),
949 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, model_hsw),
950 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, model_hsw),
951 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx),
952 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, model_hsx),
953 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl),
954 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl),
955 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, model_skl),
956 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, model_skl),
957 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx),
958 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, model_skl),
959 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, model_skl),
960 X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, model_skl),
961 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw),
962 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, model_hsw),
963 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw),
964 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, model_skl),
965 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, model_skl),
969 static int __init rapl_pmu_init(void)
971 const struct x86_cpu_id *id;
972 struct intel_rapl_init_fun *rapl_init;
973 struct rapl_model *rm;
977 id = x86_match_cpu(rapl_model_match);
981 rm = (struct rapl_model *) id->driver_data;
982 perf_msr_probe(rapl_msrs, PERF_RAPL_MAX, false, (void *) &rm->events);
984 id = x86_match_cpu(rapl_cpu_match);
988 rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
989 apply_quirk = rapl_init->apply_quirk;
990 rapl_cntr_mask = rapl_init->cntr_mask;
991 rapl_pmu_events_group.attrs = rapl_init->attrs;
993 ret = rapl_check_hw_unit(apply_quirk);
997 ret = init_rapl_pmus();
1002 * Install callbacks. Core will call them for each online cpu.
1004 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
1005 "perf/x86/rapl:online",
1006 rapl_cpu_online, rapl_cpu_offline);
1010 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
1018 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
1020 pr_warn("Initialization failed (%d), disabled\n", ret);
1021 cleanup_rapl_pmus();
1024 module_init(rapl_pmu_init);
1026 static void __exit intel_rapl_exit(void)
1028 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
1029 perf_pmu_unregister(&rapl_pmus->pmu);
1030 cleanup_rapl_pmus();
1032 module_exit(intel_rapl_exit);