1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
5 #include <asm/perf_event.h>
9 #include "../perf_event.h"
14 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
15 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
16 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
20 * Intel LBR_SELECT bits
21 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
23 * Hardware branch filter (not available on all CPUs)
25 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
26 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
27 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
28 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
29 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
30 #define LBR_RETURN_BIT 5 /* do not capture near returns */
31 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
32 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
33 #define LBR_FAR_BIT 8 /* do not capture far branches */
34 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
37 * Following bit only exists in Linux; we mask it out before writing it to
38 * the actual MSR. But it helps the constraint perf code to understand
39 * that this is a separate configuration.
41 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
43 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
44 #define LBR_USER (1 << LBR_USER_BIT)
45 #define LBR_JCC (1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN (1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR (1 << LBR_FAR_BIT)
52 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
53 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
55 #define LBR_PLM (LBR_KERNEL | LBR_USER)
57 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
58 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
59 #define LBR_IGN 0 /* ignored */
70 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
71 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
72 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
74 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
77 * x86control flow change classification
78 * x86control flow changes include branches, interrupts, traps, faults
81 X86_BR_NONE = 0, /* unknown */
83 X86_BR_USER = 1 << 0, /* branch target is user */
84 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
86 X86_BR_CALL = 1 << 2, /* call */
87 X86_BR_RET = 1 << 3, /* return */
88 X86_BR_SYSCALL = 1 << 4, /* syscall */
89 X86_BR_SYSRET = 1 << 5, /* syscall return */
90 X86_BR_INT = 1 << 6, /* sw interrupt */
91 X86_BR_IRET = 1 << 7, /* return from interrupt */
92 X86_BR_JCC = 1 << 8, /* conditional */
93 X86_BR_JMP = 1 << 9, /* jump */
94 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
95 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
96 X86_BR_ABORT = 1 << 12,/* transaction abort */
97 X86_BR_IN_TX = 1 << 13,/* in transaction */
98 X86_BR_NO_TX = 1 << 14,/* not in transaction */
99 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
100 X86_BR_CALL_STACK = 1 << 16,/* call stack */
101 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
103 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
107 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
108 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
125 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
127 #define X86_BR_ANY_CALL \
135 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
138 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
139 * otherwise it becomes near impossible to get a reliable stack.
142 static void __intel_pmu_lbr_enable(bool pmi)
144 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
145 u64 debugctl, lbr_select = 0, orig_debugctl;
148 * No need to unfreeze manually, as v4 can do that as part
149 * of the GLOBAL_STATUS ack.
151 if (pmi && x86_pmu.version >= 4)
155 * No need to reprogram LBR_SELECT in a PMI, as it
159 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
160 if (!pmi && cpuc->lbr_sel)
161 wrmsrl(MSR_LBR_SELECT, lbr_select);
163 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
164 orig_debugctl = debugctl;
165 debugctl |= DEBUGCTLMSR_LBR;
167 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
168 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
169 * may cause superfluous increase/decrease of LBR_TOS.
171 if (!(lbr_select & LBR_CALL_STACK))
172 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
173 if (orig_debugctl != debugctl)
174 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
177 static void __intel_pmu_lbr_disable(void)
181 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
182 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
183 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
186 void intel_pmu_lbr_reset_32(void)
190 for (i = 0; i < x86_pmu.lbr_nr; i++)
191 wrmsrl(x86_pmu.lbr_from + i, 0);
194 void intel_pmu_lbr_reset_64(void)
198 for (i = 0; i < x86_pmu.lbr_nr; i++) {
199 wrmsrl(x86_pmu.lbr_from + i, 0);
200 wrmsrl(x86_pmu.lbr_to + i, 0);
201 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
202 wrmsrl(MSR_LBR_INFO_0 + i, 0);
206 void intel_pmu_lbr_reset(void)
208 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
215 cpuc->last_task_ctx = NULL;
216 cpuc->last_log_id = 0;
220 * TOS = most recently recorded branch
222 static inline u64 intel_pmu_lbr_tos(void)
226 rdmsrl(x86_pmu.lbr_tos, tos);
236 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
237 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
238 * TSX is not supported they have no consistent behavior:
240 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
241 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
242 * part of the sign extension.
246 * 1) LBR has TSX format
247 * 2) CPU has no TSX support enabled
249 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
250 * value from rdmsr() must be converted to have a 61 bits sign extension,
251 * ignoring the TSX flags.
253 static inline bool lbr_from_signext_quirk_needed(void)
255 int lbr_format = x86_pmu.intel_cap.lbr_format;
256 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
257 boot_cpu_has(X86_FEATURE_RTM);
259 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
262 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
264 /* If quirk is enabled, ensure sign extension is 63 bits: */
265 inline u64 lbr_from_signext_quirk_wr(u64 val)
267 if (static_branch_unlikely(&lbr_from_quirk_key)) {
269 * Sign extend into bits 61:62 while preserving bit 63.
271 * Quirk is enabled when TSX is disabled. Therefore TSX bits
272 * in val are always OFF and must be changed to be sign
273 * extension bits. Since bits 59:60 are guaranteed to be
274 * part of the sign extension bits, we can just copy them
277 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
283 * If quirk is needed, ensure sign extension is 61 bits:
285 static u64 lbr_from_signext_quirk_rd(u64 val)
287 if (static_branch_unlikely(&lbr_from_quirk_key)) {
289 * Quirk is on when TSX is not enabled. Therefore TSX
290 * flags must be read as OFF.
292 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
297 static inline void wrlbr_from(unsigned int idx, u64 val)
299 val = lbr_from_signext_quirk_wr(val);
300 wrmsrl(x86_pmu.lbr_from + idx, val);
303 static inline void wrlbr_to(unsigned int idx, u64 val)
305 wrmsrl(x86_pmu.lbr_to + idx, val);
308 static inline u64 rdlbr_from(unsigned int idx)
312 rdmsrl(x86_pmu.lbr_from + idx, val);
314 return lbr_from_signext_quirk_rd(val);
317 static inline u64 rdlbr_to(unsigned int idx)
321 rdmsrl(x86_pmu.lbr_to + idx, val);
326 void intel_pmu_lbr_restore(void *ctx)
328 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
329 struct x86_perf_task_context *task_ctx = ctx;
331 unsigned lbr_idx, mask;
332 u64 tos = task_ctx->tos;
334 mask = x86_pmu.lbr_nr - 1;
335 for (i = 0; i < task_ctx->valid_lbrs; i++) {
336 lbr_idx = (tos - i) & mask;
337 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
338 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
340 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
341 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
344 for (; i < x86_pmu.lbr_nr; i++) {
345 lbr_idx = (tos - i) & mask;
346 wrlbr_from(lbr_idx, 0);
347 wrlbr_to(lbr_idx, 0);
348 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
349 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
352 wrmsrl(x86_pmu.lbr_tos, tos);
354 if (cpuc->lbr_select)
355 wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
358 static __always_inline bool
359 lbr_is_reset_in_cstate(struct x86_perf_task_context *task_ctx)
361 return !rdlbr_from(task_ctx->tos);
364 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
366 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
368 if (task_ctx->opt.lbr_callstack_users == 0 ||
369 task_ctx->opt.lbr_stack_state == LBR_NONE) {
370 intel_pmu_lbr_reset();
375 * Does not restore the LBR registers, if
376 * - No one else touched them, and
377 * - Was not cleared in Cstate
379 if ((task_ctx == cpuc->last_task_ctx) &&
380 (task_ctx->opt.log_id == cpuc->last_log_id) &&
381 !lbr_is_reset_in_cstate(task_ctx)) {
382 task_ctx->opt.lbr_stack_state = LBR_NONE;
386 x86_pmu.lbr_restore(task_ctx);
388 task_ctx->opt.lbr_stack_state = LBR_NONE;
391 void intel_pmu_lbr_save(void *ctx)
393 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
394 struct x86_perf_task_context *task_ctx = ctx;
395 unsigned lbr_idx, mask;
399 mask = x86_pmu.lbr_nr - 1;
400 tos = intel_pmu_lbr_tos();
401 for (i = 0; i < x86_pmu.lbr_nr; i++) {
402 lbr_idx = (tos - i) & mask;
403 from = rdlbr_from(lbr_idx);
406 task_ctx->lbr_from[i] = from;
407 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
408 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
409 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
411 task_ctx->valid_lbrs = i;
414 if (cpuc->lbr_select)
415 rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
418 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
420 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
422 if (task_ctx->opt.lbr_callstack_users == 0) {
423 task_ctx->opt.lbr_stack_state = LBR_NONE;
427 x86_pmu.lbr_save(task_ctx);
429 task_ctx->opt.lbr_stack_state = LBR_VALID;
431 cpuc->last_task_ctx = task_ctx;
432 cpuc->last_log_id = ++task_ctx->opt.log_id;
435 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
436 struct perf_event_context *next)
438 struct x86_perf_task_context *prev_ctx_data, *next_ctx_data;
440 swap(prev->task_ctx_data, next->task_ctx_data);
443 * Architecture specific synchronization makes sense in
444 * case both prev->task_ctx_data and next->task_ctx_data
445 * pointers are allocated.
448 prev_ctx_data = next->task_ctx_data;
449 next_ctx_data = prev->task_ctx_data;
451 if (!prev_ctx_data || !next_ctx_data)
454 swap(prev_ctx_data->opt.lbr_callstack_users,
455 next_ctx_data->opt.lbr_callstack_users);
458 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
460 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
461 struct x86_perf_task_context *task_ctx;
463 if (!cpuc->lbr_users)
467 * If LBR callstack feature is enabled and the stack was saved when
468 * the task was scheduled out, restore the stack. Otherwise flush
471 task_ctx = ctx ? ctx->task_ctx_data : NULL;
474 __intel_pmu_lbr_restore(task_ctx);
476 __intel_pmu_lbr_save(task_ctx);
481 * Since a context switch can flip the address space and LBR entries
482 * are not tagged with an identifier, we need to wipe the LBR, even for
483 * per-cpu events. You simply cannot resolve the branches from the old
487 intel_pmu_lbr_reset();
490 static inline bool branch_user_callstack(unsigned br_sel)
492 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
495 void intel_pmu_lbr_add(struct perf_event *event)
497 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498 struct x86_perf_task_context *task_ctx;
503 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
504 cpuc->lbr_select = 1;
506 cpuc->br_sel = event->hw.branch_reg.reg;
508 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
509 task_ctx = event->ctx->task_ctx_data;
510 task_ctx->opt.lbr_callstack_users++;
514 * Request pmu::sched_task() callback, which will fire inside the
515 * regular perf event scheduling, so that call will:
517 * - restore or wipe; when LBR-callstack,
520 * when this is from __perf_event_task_sched_in().
522 * However, if this is from perf_install_in_context(), no such callback
523 * will follow and we'll need to reset the LBR here if this is the
526 * The problem is, we cannot tell these cases apart... but we can
527 * exclude the biggest chunk of cases by looking at
528 * event->total_time_running. An event that has accrued runtime cannot
529 * be 'new'. Conversely, a new event can get installed through the
530 * context switch path for the first time.
532 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
533 cpuc->lbr_pebs_users++;
534 perf_sched_cb_inc(event->ctx->pmu);
535 if (!cpuc->lbr_users++ && !event->total_time_running)
536 intel_pmu_lbr_reset();
539 void intel_pmu_lbr_del(struct perf_event *event)
541 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
542 struct x86_perf_task_context *task_ctx;
547 if (branch_user_callstack(cpuc->br_sel) &&
548 event->ctx->task_ctx_data) {
549 task_ctx = event->ctx->task_ctx_data;
550 task_ctx->opt.lbr_callstack_users--;
553 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
554 cpuc->lbr_select = 0;
556 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
557 cpuc->lbr_pebs_users--;
559 WARN_ON_ONCE(cpuc->lbr_users < 0);
560 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
561 perf_sched_cb_dec(event->ctx->pmu);
564 static inline bool vlbr_exclude_host(void)
566 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
568 return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
569 (unsigned long *)&cpuc->intel_ctrl_guest_mask);
572 void intel_pmu_lbr_enable_all(bool pmi)
574 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
576 if (cpuc->lbr_users && !vlbr_exclude_host())
577 __intel_pmu_lbr_enable(pmi);
580 void intel_pmu_lbr_disable_all(void)
582 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
584 if (cpuc->lbr_users && !vlbr_exclude_host())
585 __intel_pmu_lbr_disable();
588 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
590 unsigned long mask = x86_pmu.lbr_nr - 1;
591 u64 tos = intel_pmu_lbr_tos();
594 for (i = 0; i < x86_pmu.lbr_nr; i++) {
595 unsigned long lbr_idx = (tos - i) & mask;
604 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
606 cpuc->lbr_entries[i].from = msr_lastbranch.from;
607 cpuc->lbr_entries[i].to = msr_lastbranch.to;
608 cpuc->lbr_entries[i].mispred = 0;
609 cpuc->lbr_entries[i].predicted = 0;
610 cpuc->lbr_entries[i].in_tx = 0;
611 cpuc->lbr_entries[i].abort = 0;
612 cpuc->lbr_entries[i].cycles = 0;
613 cpuc->lbr_entries[i].type = 0;
614 cpuc->lbr_entries[i].reserved = 0;
616 cpuc->lbr_stack.nr = i;
617 cpuc->lbr_stack.hw_idx = tos;
621 * Due to lack of segmentation in Linux the effective address (offset)
622 * is the same as the linear address, allowing us to merge the LIP and EIP
625 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
627 bool need_info = false, call_stack = false;
628 unsigned long mask = x86_pmu.lbr_nr - 1;
629 int lbr_format = x86_pmu.intel_cap.lbr_format;
630 u64 tos = intel_pmu_lbr_tos();
633 int num = x86_pmu.lbr_nr;
636 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
637 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
641 for (i = 0; i < num; i++) {
642 unsigned long lbr_idx = (tos - i) & mask;
643 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
646 int lbr_flags = lbr_desc[lbr_format];
648 from = rdlbr_from(lbr_idx);
649 to = rdlbr_to(lbr_idx);
652 * Read LBR call stack entries
653 * until invalid entry (0s) is detected.
655 if (call_stack && !from)
658 if (lbr_format == LBR_FORMAT_INFO && need_info) {
661 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
662 mis = !!(info & LBR_INFO_MISPRED);
664 in_tx = !!(info & LBR_INFO_IN_TX);
665 abort = !!(info & LBR_INFO_ABORT);
666 cycles = (info & LBR_INFO_CYCLES);
669 if (lbr_format == LBR_FORMAT_TIME) {
670 mis = !!(from & LBR_FROM_FLAG_MISPRED);
673 cycles = ((to >> 48) & LBR_INFO_CYCLES);
675 to = (u64)((((s64)to) << 16) >> 16);
678 if (lbr_flags & LBR_EIP_FLAGS) {
679 mis = !!(from & LBR_FROM_FLAG_MISPRED);
683 if (lbr_flags & LBR_TSX) {
684 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
685 abort = !!(from & LBR_FROM_FLAG_ABORT);
688 from = (u64)((((s64)from) << skip) >> skip);
691 * Some CPUs report duplicated abort records,
692 * with the second entry not having an abort bit set.
693 * Skip them here. This loop runs backwards,
694 * so we need to undo the previous record.
695 * If the abort just happened outside the window
696 * the extra entry cannot be removed.
698 if (abort && x86_pmu.lbr_double_abort && out > 0)
701 cpuc->lbr_entries[out].from = from;
702 cpuc->lbr_entries[out].to = to;
703 cpuc->lbr_entries[out].mispred = mis;
704 cpuc->lbr_entries[out].predicted = pred;
705 cpuc->lbr_entries[out].in_tx = in_tx;
706 cpuc->lbr_entries[out].abort = abort;
707 cpuc->lbr_entries[out].cycles = cycles;
708 cpuc->lbr_entries[out].type = 0;
709 cpuc->lbr_entries[out].reserved = 0;
712 cpuc->lbr_stack.nr = out;
713 cpuc->lbr_stack.hw_idx = tos;
716 void intel_pmu_lbr_read(void)
718 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
721 * Don't read when all LBRs users are using adaptive PEBS.
723 * This could be smarter and actually check the event,
724 * but this simple approach seems to work for now.
726 if (!cpuc->lbr_users || vlbr_exclude_host() ||
727 cpuc->lbr_users == cpuc->lbr_pebs_users)
730 x86_pmu.lbr_read(cpuc);
732 intel_pmu_lbr_filter(cpuc);
737 * - in case there is no HW filter
738 * - in case the HW filter has errata or limitations
740 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
742 u64 br_type = event->attr.branch_sample_type;
745 if (br_type & PERF_SAMPLE_BRANCH_USER)
748 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
749 mask |= X86_BR_KERNEL;
751 /* we ignore BRANCH_HV here */
753 if (br_type & PERF_SAMPLE_BRANCH_ANY)
756 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
757 mask |= X86_BR_ANY_CALL;
759 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
760 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
762 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
763 mask |= X86_BR_IND_CALL;
765 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
766 mask |= X86_BR_ABORT;
768 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
769 mask |= X86_BR_IN_TX;
771 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
772 mask |= X86_BR_NO_TX;
774 if (br_type & PERF_SAMPLE_BRANCH_COND)
777 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
778 if (!x86_pmu_has_lbr_callstack())
780 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
782 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
786 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
787 mask |= X86_BR_IND_JMP;
789 if (br_type & PERF_SAMPLE_BRANCH_CALL)
790 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
792 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
793 mask |= X86_BR_TYPE_SAVE;
796 * stash actual user request into reg, it may
797 * be used by fixup code for some CPU
799 event->hw.branch_reg.reg = mask;
804 * setup the HW LBR filter
805 * Used only when available, may not be enough to disambiguate
806 * all branches, may need the help of the SW filter
808 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
810 struct hw_perf_event_extra *reg;
811 u64 br_type = event->attr.branch_sample_type;
815 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
816 if (!(br_type & (1ULL << i)))
819 v = x86_pmu.lbr_sel_map[i];
820 if (v == LBR_NOT_SUPP)
827 reg = &event->hw.branch_reg;
828 reg->idx = EXTRA_REG_LBR;
831 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
832 * in suppress mode. So LBR_SELECT should be set to
833 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
834 * But the 10th bit LBR_CALL_STACK does not operate
837 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
839 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
840 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
841 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
842 reg->config |= LBR_NO_INFO;
847 int intel_pmu_setup_lbr_filter(struct perf_event *event)
858 * setup SW LBR filter
860 ret = intel_pmu_setup_sw_lbr_filter(event);
865 * setup HW LBR filter, if any
867 if (x86_pmu.lbr_sel_map)
868 ret = intel_pmu_setup_hw_lbr_filter(event);
874 * return the type of control flow change at address "from"
875 * instruction is not necessarily a branch (in case of interrupt).
877 * The branch type returned also includes the priv level of the
878 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
880 * If a branch type is unknown OR the instruction cannot be
881 * decoded (e.g., text page not present), then X86_BR_NONE is
884 static int branch_type(unsigned long from, unsigned long to, int abort)
888 int bytes_read, bytes_left;
889 int ret = X86_BR_NONE;
890 int ext, to_plm, from_plm;
891 u8 buf[MAX_INSN_SIZE];
894 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
895 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
898 * maybe zero if lbr did not fill up after a reset by the time
899 * we get a PMU interrupt
901 if (from == 0 || to == 0)
905 return X86_BR_ABORT | to_plm;
907 if (from_plm == X86_BR_USER) {
909 * can happen if measuring at the user level only
910 * and we interrupt in a kernel thread, e.g., idle.
915 /* may fail if text not present */
916 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
918 bytes_read = MAX_INSN_SIZE - bytes_left;
925 * The LBR logs any address in the IP, even if the IP just
926 * faulted. This means userspace can control the from address.
927 * Ensure we don't blindy read any address by validating it is
928 * a known text address.
930 if (kernel_text_address(from)) {
933 * Assume we can get the maximum possible size
934 * when grabbing kernel data. This is not
935 * _strictly_ true since we could possibly be
936 * executing up next to a memory hole, but
937 * it is very unlikely to be a problem.
939 bytes_read = MAX_INSN_SIZE;
946 * decoder needs to know the ABI especially
947 * on 64-bit systems running 32-bit apps
950 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
952 insn_init(&insn, addr, bytes_read, is64);
953 insn_get_opcode(&insn);
954 if (!insn.opcode.got)
957 switch (insn.opcode.bytes[0]) {
959 switch (insn.opcode.bytes[1]) {
960 case 0x05: /* syscall */
961 case 0x34: /* sysenter */
962 ret = X86_BR_SYSCALL;
964 case 0x07: /* sysret */
965 case 0x35: /* sysexit */
968 case 0x80 ... 0x8f: /* conditional */
975 case 0x70 ... 0x7f: /* conditional */
978 case 0xc2: /* near ret */
979 case 0xc3: /* near ret */
980 case 0xca: /* far ret */
981 case 0xcb: /* far ret */
984 case 0xcf: /* iret */
987 case 0xcc ... 0xce: /* int */
990 case 0xe8: /* call near rel */
991 insn_get_immediate(&insn);
992 if (insn.immediate1.value == 0) {
993 /* zero length call */
994 ret = X86_BR_ZERO_CALL;
998 case 0x9a: /* call far absolute */
1001 case 0xe0 ... 0xe3: /* loop jmp */
1004 case 0xe9 ... 0xeb: /* jmp */
1007 case 0xff: /* call near absolute, call far absolute ind */
1008 insn_get_modrm(&insn);
1009 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1011 case 2: /* near ind call */
1012 case 3: /* far ind call */
1013 ret = X86_BR_IND_CALL;
1017 ret = X86_BR_IND_JMP;
1025 * interrupts, traps, faults (and thus ring transition) may
1026 * occur on any instructions. Thus, to classify them correctly,
1027 * we need to first look at the from and to priv levels. If they
1028 * are different and to is in the kernel, then it indicates
1029 * a ring transition. If the from instruction is not a ring
1030 * transition instr (syscall, systenter, int), then it means
1031 * it was a irq, trap or fault.
1033 * we have no way of detecting kernel to kernel faults.
1035 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1036 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1040 * branch priv level determined by target as
1041 * is done by HW when LBR_SELECT is implemented
1043 if (ret != X86_BR_NONE)
1049 #define X86_BR_TYPE_MAP_MAX 16
1051 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1052 PERF_BR_CALL, /* X86_BR_CALL */
1053 PERF_BR_RET, /* X86_BR_RET */
1054 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
1055 PERF_BR_SYSRET, /* X86_BR_SYSRET */
1056 PERF_BR_UNKNOWN, /* X86_BR_INT */
1057 PERF_BR_UNKNOWN, /* X86_BR_IRET */
1058 PERF_BR_COND, /* X86_BR_JCC */
1059 PERF_BR_UNCOND, /* X86_BR_JMP */
1060 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
1061 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
1062 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
1063 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
1064 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
1065 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
1066 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
1067 PERF_BR_IND, /* X86_BR_IND_JMP */
1071 common_branch_type(int type)
1075 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1079 if (i < X86_BR_TYPE_MAP_MAX)
1080 return branch_map[i];
1083 return PERF_BR_UNKNOWN;
1087 * implement actual branch filter based on user demand.
1088 * Hardware may not exactly satisfy that request, thus
1089 * we need to inspect opcodes. Mismatched branches are
1090 * discarded. Therefore, the number of branches returned
1091 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1094 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1097 int br_sel = cpuc->br_sel;
1099 bool compress = false;
1101 /* if sampling all branches, then nothing to filter */
1102 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1103 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1106 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1108 from = cpuc->lbr_entries[i].from;
1109 to = cpuc->lbr_entries[i].to;
1111 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1112 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1113 if (cpuc->lbr_entries[i].in_tx)
1114 type |= X86_BR_IN_TX;
1116 type |= X86_BR_NO_TX;
1119 /* if type does not correspond, then discard */
1120 if (type == X86_BR_NONE || (br_sel & type) != type) {
1121 cpuc->lbr_entries[i].from = 0;
1125 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1126 cpuc->lbr_entries[i].type = common_branch_type(type);
1132 /* remove all entries with from=0 */
1133 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1134 if (!cpuc->lbr_entries[i].from) {
1136 while (++j < cpuc->lbr_stack.nr)
1137 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1138 cpuc->lbr_stack.nr--;
1139 if (!cpuc->lbr_entries[i].from)
1146 void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
1148 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1151 cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
1153 /* Cannot get TOS for large PEBS */
1154 if (cpuc->n_pebs == cpuc->n_large_pebs)
1155 cpuc->lbr_stack.hw_idx = -1ULL;
1157 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1159 for (i = 0; i < x86_pmu.lbr_nr; i++) {
1160 u64 info = lbr->lbr[i].info;
1161 struct perf_branch_entry *e = &cpuc->lbr_entries[i];
1163 e->from = lbr->lbr[i].from;
1164 e->to = lbr->lbr[i].to;
1165 e->mispred = !!(info & LBR_INFO_MISPRED);
1166 e->predicted = !(info & LBR_INFO_MISPRED);
1167 e->in_tx = !!(info & LBR_INFO_IN_TX);
1168 e->abort = !!(info & LBR_INFO_ABORT);
1169 e->cycles = info & LBR_INFO_CYCLES;
1172 intel_pmu_lbr_filter(cpuc);
1176 * Map interface branch filters onto LBR filters
1178 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1179 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1180 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1181 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1182 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1183 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1184 | LBR_IND_JMP | LBR_FAR,
1186 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1188 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1189 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1191 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1193 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1194 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1195 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1198 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1199 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1200 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1201 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1202 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1203 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1204 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1206 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1207 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1208 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1209 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1212 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1213 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1214 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1215 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1216 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1217 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1218 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1220 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1221 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1222 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1223 | LBR_RETURN | LBR_CALL_STACK,
1224 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1225 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1229 void __init intel_pmu_lbr_init_core(void)
1232 x86_pmu.lbr_tos = MSR_LBR_TOS;
1233 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1234 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1237 * SW branch filter usage:
1238 * - compensate for lack of HW filter
1242 /* nehalem/westmere */
1243 void __init intel_pmu_lbr_init_nhm(void)
1245 x86_pmu.lbr_nr = 16;
1246 x86_pmu.lbr_tos = MSR_LBR_TOS;
1247 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1248 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1250 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1251 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1254 * SW branch filter usage:
1255 * - workaround LBR_SEL errata (see above)
1256 * - support syscall, sysret capture.
1257 * That requires LBR_FAR but that means far
1258 * jmp need to be filtered out
1263 void __init intel_pmu_lbr_init_snb(void)
1265 x86_pmu.lbr_nr = 16;
1266 x86_pmu.lbr_tos = MSR_LBR_TOS;
1267 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1268 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1270 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1271 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1274 * SW branch filter usage:
1275 * - support syscall, sysret capture.
1276 * That requires LBR_FAR but that means far
1277 * jmp need to be filtered out
1282 void intel_pmu_lbr_init_hsw(void)
1284 x86_pmu.lbr_nr = 16;
1285 x86_pmu.lbr_tos = MSR_LBR_TOS;
1286 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1287 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1289 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1290 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1292 if (lbr_from_signext_quirk_needed())
1293 static_branch_enable(&lbr_from_quirk_key);
1297 __init void intel_pmu_lbr_init_skl(void)
1299 x86_pmu.lbr_nr = 32;
1300 x86_pmu.lbr_tos = MSR_LBR_TOS;
1301 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1302 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1304 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1305 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1308 * SW branch filter usage:
1309 * - support syscall, sysret capture.
1310 * That requires LBR_FAR but that means far
1311 * jmp need to be filtered out
1316 void __init intel_pmu_lbr_init_atom(void)
1319 * only models starting at stepping 10 seems
1320 * to have an operational LBR which can freeze
1323 if (boot_cpu_data.x86_model == 28
1324 && boot_cpu_data.x86_stepping < 10) {
1325 pr_cont("LBR disabled due to erratum");
1330 x86_pmu.lbr_tos = MSR_LBR_TOS;
1331 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1332 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1335 * SW branch filter usage:
1336 * - compensate for lack of HW filter
1341 void __init intel_pmu_lbr_init_slm(void)
1344 x86_pmu.lbr_tos = MSR_LBR_TOS;
1345 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1346 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1348 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1349 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1352 * SW branch filter usage:
1353 * - compensate for lack of HW filter
1355 pr_cont("8-deep LBR, ");
1358 /* Knights Landing */
1359 void intel_pmu_lbr_init_knl(void)
1362 x86_pmu.lbr_tos = MSR_LBR_TOS;
1363 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1364 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1366 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1367 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1369 /* Knights Landing does have MISPREDICT bit */
1370 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1371 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1375 * x86_perf_get_lbr - get the LBR records information
1377 * @lbr: the caller's memory to store the LBR records information
1379 * Returns: 0 indicates the LBR info has been successfully obtained
1381 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1383 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1385 lbr->nr = x86_pmu.lbr_nr;
1386 lbr->from = x86_pmu.lbr_from;
1387 lbr->to = x86_pmu.lbr_to;
1388 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? MSR_LBR_INFO_0 : 0;
1392 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1394 struct event_constraint vlbr_constraint =
1395 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1396 FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);