1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
5 #include <asm/perf_event.h>
9 #include "../perf_event.h"
12 * Intel LBR_SELECT bits
13 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
15 * Hardware branch filter (not available on all CPUs)
17 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
18 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
19 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
20 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
21 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
22 #define LBR_RETURN_BIT 5 /* do not capture near returns */
23 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
24 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
25 #define LBR_FAR_BIT 8 /* do not capture far branches */
26 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
29 * Following bit only exists in Linux; we mask it out before writing it to
30 * the actual MSR. But it helps the constraint perf code to understand
31 * that this is a separate configuration.
33 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
35 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
36 #define LBR_USER (1 << LBR_USER_BIT)
37 #define LBR_JCC (1 << LBR_JCC_BIT)
38 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
39 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
40 #define LBR_RETURN (1 << LBR_RETURN_BIT)
41 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
42 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
43 #define LBR_FAR (1 << LBR_FAR_BIT)
44 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
45 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
47 #define LBR_PLM (LBR_KERNEL | LBR_USER)
49 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
50 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
51 #define LBR_IGN 0 /* ignored */
62 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
63 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
64 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
66 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
69 * x86control flow change classification
70 * x86control flow changes include branches, interrupts, traps, faults
73 X86_BR_NONE = 0, /* unknown */
75 X86_BR_USER = 1 << 0, /* branch target is user */
76 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
78 X86_BR_CALL = 1 << 2, /* call */
79 X86_BR_RET = 1 << 3, /* return */
80 X86_BR_SYSCALL = 1 << 4, /* syscall */
81 X86_BR_SYSRET = 1 << 5, /* syscall return */
82 X86_BR_INT = 1 << 6, /* sw interrupt */
83 X86_BR_IRET = 1 << 7, /* return from interrupt */
84 X86_BR_JCC = 1 << 8, /* conditional */
85 X86_BR_JMP = 1 << 9, /* jump */
86 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
87 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
88 X86_BR_ABORT = 1 << 12,/* transaction abort */
89 X86_BR_IN_TX = 1 << 13,/* in transaction */
90 X86_BR_NO_TX = 1 << 14,/* not in transaction */
91 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
92 X86_BR_CALL_STACK = 1 << 16,/* call stack */
93 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
95 X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
99 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
100 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
117 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
119 #define X86_BR_ANY_CALL \
130 * Hardware branch filter for Arch LBR
132 #define ARCH_LBR_KERNEL_BIT 1 /* capture at ring0 */
133 #define ARCH_LBR_USER_BIT 2 /* capture at ring > 0 */
134 #define ARCH_LBR_CALL_STACK_BIT 3 /* enable call stack */
135 #define ARCH_LBR_JCC_BIT 16 /* capture conditional branches */
136 #define ARCH_LBR_REL_JMP_BIT 17 /* capture relative jumps */
137 #define ARCH_LBR_IND_JMP_BIT 18 /* capture indirect jumps */
138 #define ARCH_LBR_REL_CALL_BIT 19 /* capture relative calls */
139 #define ARCH_LBR_IND_CALL_BIT 20 /* capture indirect calls */
140 #define ARCH_LBR_RETURN_BIT 21 /* capture near returns */
141 #define ARCH_LBR_OTHER_BRANCH_BIT 22 /* capture other branches */
143 #define ARCH_LBR_KERNEL (1ULL << ARCH_LBR_KERNEL_BIT)
144 #define ARCH_LBR_USER (1ULL << ARCH_LBR_USER_BIT)
145 #define ARCH_LBR_CALL_STACK (1ULL << ARCH_LBR_CALL_STACK_BIT)
146 #define ARCH_LBR_JCC (1ULL << ARCH_LBR_JCC_BIT)
147 #define ARCH_LBR_REL_JMP (1ULL << ARCH_LBR_REL_JMP_BIT)
148 #define ARCH_LBR_IND_JMP (1ULL << ARCH_LBR_IND_JMP_BIT)
149 #define ARCH_LBR_REL_CALL (1ULL << ARCH_LBR_REL_CALL_BIT)
150 #define ARCH_LBR_IND_CALL (1ULL << ARCH_LBR_IND_CALL_BIT)
151 #define ARCH_LBR_RETURN (1ULL << ARCH_LBR_RETURN_BIT)
152 #define ARCH_LBR_OTHER_BRANCH (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
154 #define ARCH_LBR_ANY \
161 ARCH_LBR_OTHER_BRANCH)
163 #define ARCH_LBR_CTL_MASK 0x7f000e
165 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
167 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
169 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
170 return !!(config & ARCH_LBR_CALL_STACK);
172 return !!(config & LBR_CALL_STACK);
176 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
177 * otherwise it becomes near impossible to get a reliable stack.
180 static void __intel_pmu_lbr_enable(bool pmi)
182 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
183 u64 debugctl, lbr_select = 0, orig_debugctl;
186 * No need to unfreeze manually, as v4 can do that as part
187 * of the GLOBAL_STATUS ack.
189 if (pmi && x86_pmu.version >= 4)
193 * No need to reprogram LBR_SELECT in a PMI, as it
197 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
198 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
199 wrmsrl(MSR_LBR_SELECT, lbr_select);
201 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
202 orig_debugctl = debugctl;
204 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
205 debugctl |= DEBUGCTLMSR_LBR;
207 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
208 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
209 * may cause superfluous increase/decrease of LBR_TOS.
211 if (is_lbr_call_stack_bit_set(lbr_select))
212 debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
214 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
216 if (orig_debugctl != debugctl)
217 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
219 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
220 wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
223 void intel_pmu_lbr_reset_32(void)
227 for (i = 0; i < x86_pmu.lbr_nr; i++)
228 wrmsrl(x86_pmu.lbr_from + i, 0);
231 void intel_pmu_lbr_reset_64(void)
235 for (i = 0; i < x86_pmu.lbr_nr; i++) {
236 wrmsrl(x86_pmu.lbr_from + i, 0);
237 wrmsrl(x86_pmu.lbr_to + i, 0);
238 if (x86_pmu.lbr_has_info)
239 wrmsrl(x86_pmu.lbr_info + i, 0);
243 static void intel_pmu_arch_lbr_reset(void)
245 /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
246 wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
249 void intel_pmu_lbr_reset(void)
251 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
258 cpuc->last_task_ctx = NULL;
259 cpuc->last_log_id = 0;
260 if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select)
261 wrmsrl(MSR_LBR_SELECT, 0);
265 * TOS = most recently recorded branch
267 static inline u64 intel_pmu_lbr_tos(void)
271 rdmsrl(x86_pmu.lbr_tos, tos);
281 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
282 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
283 * TSX is not supported they have no consistent behavior:
285 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
286 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
287 * part of the sign extension.
291 * 1) LBR has TSX format
292 * 2) CPU has no TSX support enabled
294 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
295 * value from rdmsr() must be converted to have a 61 bits sign extension,
296 * ignoring the TSX flags.
298 static inline bool lbr_from_signext_quirk_needed(void)
300 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
301 boot_cpu_has(X86_FEATURE_RTM);
303 return !tsx_support && x86_pmu.lbr_has_tsx;
306 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
308 /* If quirk is enabled, ensure sign extension is 63 bits: */
309 inline u64 lbr_from_signext_quirk_wr(u64 val)
311 if (static_branch_unlikely(&lbr_from_quirk_key)) {
313 * Sign extend into bits 61:62 while preserving bit 63.
315 * Quirk is enabled when TSX is disabled. Therefore TSX bits
316 * in val are always OFF and must be changed to be sign
317 * extension bits. Since bits 59:60 are guaranteed to be
318 * part of the sign extension bits, we can just copy them
321 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
327 * If quirk is needed, ensure sign extension is 61 bits:
329 static u64 lbr_from_signext_quirk_rd(u64 val)
331 if (static_branch_unlikely(&lbr_from_quirk_key)) {
333 * Quirk is on when TSX is not enabled. Therefore TSX
334 * flags must be read as OFF.
336 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
341 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
343 val = lbr_from_signext_quirk_wr(val);
344 wrmsrl(x86_pmu.lbr_from + idx, val);
347 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
349 wrmsrl(x86_pmu.lbr_to + idx, val);
352 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
354 wrmsrl(x86_pmu.lbr_info + idx, val);
357 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
364 rdmsrl(x86_pmu.lbr_from + idx, val);
366 return lbr_from_signext_quirk_rd(val);
369 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
376 rdmsrl(x86_pmu.lbr_to + idx, val);
381 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
388 rdmsrl(x86_pmu.lbr_info + idx, val);
394 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
396 wrlbr_from(idx, lbr->from);
397 wrlbr_to(idx, lbr->to);
399 wrlbr_info(idx, lbr->info);
403 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
405 u64 from = rdlbr_from(idx, NULL);
407 /* Don't read invalid entry */
412 lbr->to = rdlbr_to(idx, NULL);
414 lbr->info = rdlbr_info(idx, NULL);
419 void intel_pmu_lbr_restore(void *ctx)
421 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
422 struct x86_perf_task_context *task_ctx = ctx;
423 bool need_info = x86_pmu.lbr_has_info;
424 u64 tos = task_ctx->tos;
425 unsigned lbr_idx, mask;
428 mask = x86_pmu.lbr_nr - 1;
429 for (i = 0; i < task_ctx->valid_lbrs; i++) {
430 lbr_idx = (tos - i) & mask;
431 wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
434 for (; i < x86_pmu.lbr_nr; i++) {
435 lbr_idx = (tos - i) & mask;
436 wrlbr_from(lbr_idx, 0);
437 wrlbr_to(lbr_idx, 0);
439 wrlbr_info(lbr_idx, 0);
442 wrmsrl(x86_pmu.lbr_tos, tos);
444 if (cpuc->lbr_select)
445 wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
448 static void intel_pmu_arch_lbr_restore(void *ctx)
450 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
451 struct lbr_entry *entries = task_ctx->entries;
454 /* Fast reset the LBRs before restore if the call stack is not full. */
455 if (!entries[x86_pmu.lbr_nr - 1].from)
456 intel_pmu_arch_lbr_reset();
458 for (i = 0; i < x86_pmu.lbr_nr; i++) {
459 if (!entries[i].from)
461 wrlbr_all(&entries[i], i, true);
466 * Restore the Architecture LBR state from the xsave area in the perf
467 * context data for the task via the XRSTORS instruction.
469 static void intel_pmu_arch_lbr_xrstors(void *ctx)
471 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
473 xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR);
476 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
478 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
479 return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
481 return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
484 static void __intel_pmu_lbr_restore(void *ctx)
486 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
488 if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
489 task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
490 intel_pmu_lbr_reset();
495 * Does not restore the LBR registers, if
496 * - No one else touched them, and
497 * - Was not cleared in Cstate
499 if ((ctx == cpuc->last_task_ctx) &&
500 (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
501 !lbr_is_reset_in_cstate(ctx)) {
502 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
506 x86_pmu.lbr_restore(ctx);
508 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
511 void intel_pmu_lbr_save(void *ctx)
513 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
514 struct x86_perf_task_context *task_ctx = ctx;
515 bool need_info = x86_pmu.lbr_has_info;
516 unsigned lbr_idx, mask;
520 mask = x86_pmu.lbr_nr - 1;
521 tos = intel_pmu_lbr_tos();
522 for (i = 0; i < x86_pmu.lbr_nr; i++) {
523 lbr_idx = (tos - i) & mask;
524 if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
527 task_ctx->valid_lbrs = i;
530 if (cpuc->lbr_select)
531 rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
534 static void intel_pmu_arch_lbr_save(void *ctx)
536 struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
537 struct lbr_entry *entries = task_ctx->entries;
540 for (i = 0; i < x86_pmu.lbr_nr; i++) {
541 if (!rdlbr_all(&entries[i], i, true))
545 /* LBR call stack is not full. Reset is required in restore. */
546 if (i < x86_pmu.lbr_nr)
547 entries[x86_pmu.lbr_nr - 1].from = 0;
551 * Save the Architecture LBR state to the xsave area in the perf
552 * context data for the task via the XSAVES instruction.
554 static void intel_pmu_arch_lbr_xsaves(void *ctx)
556 struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
558 xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR);
561 static void __intel_pmu_lbr_save(void *ctx)
563 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
565 if (task_context_opt(ctx)->lbr_callstack_users == 0) {
566 task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
570 x86_pmu.lbr_save(ctx);
572 task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
574 cpuc->last_task_ctx = ctx;
575 cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
578 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
579 struct perf_event_context *next)
581 void *prev_ctx_data, *next_ctx_data;
583 swap(prev->task_ctx_data, next->task_ctx_data);
586 * Architecture specific synchronization makes sense in
587 * case both prev->task_ctx_data and next->task_ctx_data
588 * pointers are allocated.
591 prev_ctx_data = next->task_ctx_data;
592 next_ctx_data = prev->task_ctx_data;
594 if (!prev_ctx_data || !next_ctx_data)
597 swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
598 task_context_opt(next_ctx_data)->lbr_callstack_users);
601 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
603 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
606 if (!cpuc->lbr_users)
610 * If LBR callstack feature is enabled and the stack was saved when
611 * the task was scheduled out, restore the stack. Otherwise flush
614 task_ctx = ctx ? ctx->task_ctx_data : NULL;
617 __intel_pmu_lbr_restore(task_ctx);
619 __intel_pmu_lbr_save(task_ctx);
624 * Since a context switch can flip the address space and LBR entries
625 * are not tagged with an identifier, we need to wipe the LBR, even for
626 * per-cpu events. You simply cannot resolve the branches from the old
630 intel_pmu_lbr_reset();
633 static inline bool branch_user_callstack(unsigned br_sel)
635 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
638 void intel_pmu_lbr_add(struct perf_event *event)
640 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
645 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
646 cpuc->lbr_select = 1;
648 cpuc->br_sel = event->hw.branch_reg.reg;
650 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
651 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
654 * Request pmu::sched_task() callback, which will fire inside the
655 * regular perf event scheduling, so that call will:
657 * - restore or wipe; when LBR-callstack,
660 * when this is from __perf_event_task_sched_in().
662 * However, if this is from perf_install_in_context(), no such callback
663 * will follow and we'll need to reset the LBR here if this is the
666 * The problem is, we cannot tell these cases apart... but we can
667 * exclude the biggest chunk of cases by looking at
668 * event->total_time_running. An event that has accrued runtime cannot
669 * be 'new'. Conversely, a new event can get installed through the
670 * context switch path for the first time.
672 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
673 cpuc->lbr_pebs_users++;
674 perf_sched_cb_inc(event->ctx->pmu);
675 if (!cpuc->lbr_users++ && !event->total_time_running)
676 intel_pmu_lbr_reset();
679 void release_lbr_buffers(void)
681 struct kmem_cache *kmem_cache;
682 struct cpu_hw_events *cpuc;
685 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
688 for_each_possible_cpu(cpu) {
689 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
690 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
691 if (kmem_cache && cpuc->lbr_xsave) {
692 kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
693 cpuc->lbr_xsave = NULL;
698 void reserve_lbr_buffers(void)
700 struct kmem_cache *kmem_cache;
701 struct cpu_hw_events *cpuc;
704 if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
707 for_each_possible_cpu(cpu) {
708 cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
709 kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
710 if (!kmem_cache || cpuc->lbr_xsave)
713 cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
714 GFP_KERNEL | __GFP_ZERO,
719 void intel_pmu_lbr_del(struct perf_event *event)
721 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
726 if (branch_user_callstack(cpuc->br_sel) &&
727 event->ctx->task_ctx_data)
728 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
730 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
731 cpuc->lbr_select = 0;
733 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
734 cpuc->lbr_pebs_users--;
736 WARN_ON_ONCE(cpuc->lbr_users < 0);
737 WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
738 perf_sched_cb_dec(event->ctx->pmu);
741 static inline bool vlbr_exclude_host(void)
743 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
745 return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
746 (unsigned long *)&cpuc->intel_ctrl_guest_mask);
749 void intel_pmu_lbr_enable_all(bool pmi)
751 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
753 if (cpuc->lbr_users && !vlbr_exclude_host())
754 __intel_pmu_lbr_enable(pmi);
757 void intel_pmu_lbr_disable_all(void)
759 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
761 if (cpuc->lbr_users && !vlbr_exclude_host()) {
762 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
763 return __intel_pmu_arch_lbr_disable();
765 __intel_pmu_lbr_disable();
769 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
771 unsigned long mask = x86_pmu.lbr_nr - 1;
772 u64 tos = intel_pmu_lbr_tos();
775 for (i = 0; i < x86_pmu.lbr_nr; i++) {
776 unsigned long lbr_idx = (tos - i) & mask;
785 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
787 cpuc->lbr_entries[i].from = msr_lastbranch.from;
788 cpuc->lbr_entries[i].to = msr_lastbranch.to;
789 cpuc->lbr_entries[i].mispred = 0;
790 cpuc->lbr_entries[i].predicted = 0;
791 cpuc->lbr_entries[i].in_tx = 0;
792 cpuc->lbr_entries[i].abort = 0;
793 cpuc->lbr_entries[i].cycles = 0;
794 cpuc->lbr_entries[i].type = 0;
795 cpuc->lbr_entries[i].reserved = 0;
797 cpuc->lbr_stack.nr = i;
798 cpuc->lbr_stack.hw_idx = tos;
802 * Due to lack of segmentation in Linux the effective address (offset)
803 * is the same as the linear address, allowing us to merge the LIP and EIP
806 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
808 bool need_info = false, call_stack = false;
809 unsigned long mask = x86_pmu.lbr_nr - 1;
810 u64 tos = intel_pmu_lbr_tos();
813 int num = x86_pmu.lbr_nr;
816 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
817 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
821 for (i = 0; i < num; i++) {
822 unsigned long lbr_idx = (tos - i) & mask;
823 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
826 from = rdlbr_from(lbr_idx, NULL);
827 to = rdlbr_to(lbr_idx, NULL);
830 * Read LBR call stack entries
831 * until invalid entry (0s) is detected.
833 if (call_stack && !from)
836 if (x86_pmu.lbr_has_info) {
840 info = rdlbr_info(lbr_idx, NULL);
841 mis = !!(info & LBR_INFO_MISPRED);
843 cycles = (info & LBR_INFO_CYCLES);
844 if (x86_pmu.lbr_has_tsx) {
845 in_tx = !!(info & LBR_INFO_IN_TX);
846 abort = !!(info & LBR_INFO_ABORT);
852 if (x86_pmu.lbr_from_flags) {
853 mis = !!(from & LBR_FROM_FLAG_MISPRED);
857 if (x86_pmu.lbr_has_tsx) {
858 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
859 abort = !!(from & LBR_FROM_FLAG_ABORT);
862 from = (u64)((((s64)from) << skip) >> skip);
864 if (x86_pmu.lbr_to_cycles) {
865 cycles = ((to >> 48) & LBR_INFO_CYCLES);
866 to = (u64)((((s64)to) << 16) >> 16);
871 * Some CPUs report duplicated abort records,
872 * with the second entry not having an abort bit set.
873 * Skip them here. This loop runs backwards,
874 * so we need to undo the previous record.
875 * If the abort just happened outside the window
876 * the extra entry cannot be removed.
878 if (abort && x86_pmu.lbr_double_abort && out > 0)
881 cpuc->lbr_entries[out].from = from;
882 cpuc->lbr_entries[out].to = to;
883 cpuc->lbr_entries[out].mispred = mis;
884 cpuc->lbr_entries[out].predicted = pred;
885 cpuc->lbr_entries[out].in_tx = in_tx;
886 cpuc->lbr_entries[out].abort = abort;
887 cpuc->lbr_entries[out].cycles = cycles;
888 cpuc->lbr_entries[out].type = 0;
889 cpuc->lbr_entries[out].reserved = 0;
892 cpuc->lbr_stack.nr = out;
893 cpuc->lbr_stack.hw_idx = tos;
896 static DEFINE_STATIC_KEY_FALSE(x86_lbr_mispred);
897 static DEFINE_STATIC_KEY_FALSE(x86_lbr_cycles);
898 static DEFINE_STATIC_KEY_FALSE(x86_lbr_type);
900 static __always_inline int get_lbr_br_type(u64 info)
904 if (static_branch_likely(&x86_lbr_type))
905 type = (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
910 static __always_inline bool get_lbr_mispred(u64 info)
914 if (static_branch_likely(&x86_lbr_mispred))
915 mispred = !!(info & LBR_INFO_MISPRED);
920 static __always_inline u16 get_lbr_cycles(u64 info)
922 u16 cycles = info & LBR_INFO_CYCLES;
924 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
925 (!static_branch_likely(&x86_lbr_cycles) ||
926 !(info & LBR_INFO_CYC_CNT_VALID)))
932 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
933 struct lbr_entry *entries)
935 struct perf_branch_entry *e;
936 struct lbr_entry *lbr;
940 for (i = 0; i < x86_pmu.lbr_nr; i++) {
941 lbr = entries ? &entries[i] : NULL;
942 e = &cpuc->lbr_entries[i];
944 from = rdlbr_from(i, lbr);
946 * Read LBR entries until invalid entry (0s) is detected.
951 to = rdlbr_to(i, lbr);
952 info = rdlbr_info(i, lbr);
956 e->mispred = get_lbr_mispred(info);
957 e->predicted = !e->mispred;
958 e->in_tx = !!(info & LBR_INFO_IN_TX);
959 e->abort = !!(info & LBR_INFO_ABORT);
960 e->cycles = get_lbr_cycles(info);
961 e->type = get_lbr_br_type(info);
965 cpuc->lbr_stack.nr = i;
968 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
970 intel_pmu_store_lbr(cpuc, NULL);
973 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
975 struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
978 intel_pmu_store_lbr(cpuc, NULL);
981 xsaves(&xsave->xsave, XFEATURE_MASK_LBR);
983 intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
986 void intel_pmu_lbr_read(void)
988 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
991 * Don't read when all LBRs users are using adaptive PEBS.
993 * This could be smarter and actually check the event,
994 * but this simple approach seems to work for now.
996 if (!cpuc->lbr_users || vlbr_exclude_host() ||
997 cpuc->lbr_users == cpuc->lbr_pebs_users)
1000 x86_pmu.lbr_read(cpuc);
1002 intel_pmu_lbr_filter(cpuc);
1006 * SW filter is used:
1007 * - in case there is no HW filter
1008 * - in case the HW filter has errata or limitations
1010 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1012 u64 br_type = event->attr.branch_sample_type;
1015 if (br_type & PERF_SAMPLE_BRANCH_USER)
1016 mask |= X86_BR_USER;
1018 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1019 mask |= X86_BR_KERNEL;
1021 /* we ignore BRANCH_HV here */
1023 if (br_type & PERF_SAMPLE_BRANCH_ANY)
1026 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1027 mask |= X86_BR_ANY_CALL;
1029 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1030 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1032 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1033 mask |= X86_BR_IND_CALL;
1035 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1036 mask |= X86_BR_ABORT;
1038 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1039 mask |= X86_BR_IN_TX;
1041 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1042 mask |= X86_BR_NO_TX;
1044 if (br_type & PERF_SAMPLE_BRANCH_COND)
1047 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1048 if (!x86_pmu_has_lbr_callstack())
1050 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1052 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1056 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1057 mask |= X86_BR_IND_JMP;
1059 if (br_type & PERF_SAMPLE_BRANCH_CALL)
1060 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1062 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1063 mask |= X86_BR_TYPE_SAVE;
1066 * stash actual user request into reg, it may
1067 * be used by fixup code for some CPU
1069 event->hw.branch_reg.reg = mask;
1074 * setup the HW LBR filter
1075 * Used only when available, may not be enough to disambiguate
1076 * all branches, may need the help of the SW filter
1078 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1080 struct hw_perf_event_extra *reg;
1081 u64 br_type = event->attr.branch_sample_type;
1085 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1086 if (!(br_type & (1ULL << i)))
1089 v = x86_pmu.lbr_sel_map[i];
1090 if (v == LBR_NOT_SUPP)
1097 reg = &event->hw.branch_reg;
1098 reg->idx = EXTRA_REG_LBR;
1100 if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1106 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1107 * in suppress mode. So LBR_SELECT should be set to
1108 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1109 * But the 10th bit LBR_CALL_STACK does not operate
1112 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1114 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1115 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1116 x86_pmu.lbr_has_info)
1117 reg->config |= LBR_NO_INFO;
1122 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1127 * no LBR on this PMU
1129 if (!x86_pmu.lbr_nr)
1133 * setup SW LBR filter
1135 ret = intel_pmu_setup_sw_lbr_filter(event);
1140 * setup HW LBR filter, if any
1142 if (x86_pmu.lbr_sel_map)
1143 ret = intel_pmu_setup_hw_lbr_filter(event);
1149 * return the type of control flow change at address "from"
1150 * instruction is not necessarily a branch (in case of interrupt).
1152 * The branch type returned also includes the priv level of the
1153 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1155 * If a branch type is unknown OR the instruction cannot be
1156 * decoded (e.g., text page not present), then X86_BR_NONE is
1159 static int branch_type(unsigned long from, unsigned long to, int abort)
1163 int bytes_read, bytes_left;
1164 int ret = X86_BR_NONE;
1165 int ext, to_plm, from_plm;
1166 u8 buf[MAX_INSN_SIZE];
1169 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1170 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1173 * maybe zero if lbr did not fill up after a reset by the time
1174 * we get a PMU interrupt
1176 if (from == 0 || to == 0)
1180 return X86_BR_ABORT | to_plm;
1182 if (from_plm == X86_BR_USER) {
1184 * can happen if measuring at the user level only
1185 * and we interrupt in a kernel thread, e.g., idle.
1190 /* may fail if text not present */
1191 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1193 bytes_read = MAX_INSN_SIZE - bytes_left;
1200 * The LBR logs any address in the IP, even if the IP just
1201 * faulted. This means userspace can control the from address.
1202 * Ensure we don't blindly read any address by validating it is
1203 * a known text address.
1205 if (kernel_text_address(from)) {
1206 addr = (void *)from;
1208 * Assume we can get the maximum possible size
1209 * when grabbing kernel data. This is not
1210 * _strictly_ true since we could possibly be
1211 * executing up next to a memory hole, but
1212 * it is very unlikely to be a problem.
1214 bytes_read = MAX_INSN_SIZE;
1221 * decoder needs to know the ABI especially
1222 * on 64-bit systems running 32-bit apps
1224 #ifdef CONFIG_X86_64
1225 is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
1227 insn_init(&insn, addr, bytes_read, is64);
1228 if (insn_get_opcode(&insn))
1229 return X86_BR_ABORT;
1231 switch (insn.opcode.bytes[0]) {
1233 switch (insn.opcode.bytes[1]) {
1234 case 0x05: /* syscall */
1235 case 0x34: /* sysenter */
1236 ret = X86_BR_SYSCALL;
1238 case 0x07: /* sysret */
1239 case 0x35: /* sysexit */
1240 ret = X86_BR_SYSRET;
1242 case 0x80 ... 0x8f: /* conditional */
1249 case 0x70 ... 0x7f: /* conditional */
1252 case 0xc2: /* near ret */
1253 case 0xc3: /* near ret */
1254 case 0xca: /* far ret */
1255 case 0xcb: /* far ret */
1258 case 0xcf: /* iret */
1261 case 0xcc ... 0xce: /* int */
1264 case 0xe8: /* call near rel */
1265 if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
1266 /* zero length call */
1267 ret = X86_BR_ZERO_CALL;
1271 case 0x9a: /* call far absolute */
1274 case 0xe0 ... 0xe3: /* loop jmp */
1277 case 0xe9 ... 0xeb: /* jmp */
1280 case 0xff: /* call near absolute, call far absolute ind */
1281 if (insn_get_modrm(&insn))
1282 return X86_BR_ABORT;
1284 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1286 case 2: /* near ind call */
1287 case 3: /* far ind call */
1288 ret = X86_BR_IND_CALL;
1292 ret = X86_BR_IND_JMP;
1300 * interrupts, traps, faults (and thus ring transition) may
1301 * occur on any instructions. Thus, to classify them correctly,
1302 * we need to first look at the from and to priv levels. If they
1303 * are different and to is in the kernel, then it indicates
1304 * a ring transition. If the from instruction is not a ring
1305 * transition instr (syscall, systenter, int), then it means
1306 * it was a irq, trap or fault.
1308 * we have no way of detecting kernel to kernel faults.
1310 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1311 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1315 * branch priv level determined by target as
1316 * is done by HW when LBR_SELECT is implemented
1318 if (ret != X86_BR_NONE)
1324 #define X86_BR_TYPE_MAP_MAX 16
1326 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1327 PERF_BR_CALL, /* X86_BR_CALL */
1328 PERF_BR_RET, /* X86_BR_RET */
1329 PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
1330 PERF_BR_SYSRET, /* X86_BR_SYSRET */
1331 PERF_BR_UNKNOWN, /* X86_BR_INT */
1332 PERF_BR_UNKNOWN, /* X86_BR_IRET */
1333 PERF_BR_COND, /* X86_BR_JCC */
1334 PERF_BR_UNCOND, /* X86_BR_JMP */
1335 PERF_BR_UNKNOWN, /* X86_BR_IRQ */
1336 PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
1337 PERF_BR_UNKNOWN, /* X86_BR_ABORT */
1338 PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
1339 PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
1340 PERF_BR_CALL, /* X86_BR_ZERO_CALL */
1341 PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
1342 PERF_BR_IND, /* X86_BR_IND_JMP */
1346 common_branch_type(int type)
1350 type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1354 if (i < X86_BR_TYPE_MAP_MAX)
1355 return branch_map[i];
1358 return PERF_BR_UNKNOWN;
1362 ARCH_LBR_BR_TYPE_JCC = 0,
1363 ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1,
1364 ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2,
1365 ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3,
1366 ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4,
1367 ARCH_LBR_BR_TYPE_NEAR_RET = 5,
1368 ARCH_LBR_BR_TYPE_KNOWN_MAX = ARCH_LBR_BR_TYPE_NEAR_RET,
1370 ARCH_LBR_BR_TYPE_MAP_MAX = 16,
1373 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1374 [ARCH_LBR_BR_TYPE_JCC] = X86_BR_JCC,
1375 [ARCH_LBR_BR_TYPE_NEAR_IND_JMP] = X86_BR_IND_JMP,
1376 [ARCH_LBR_BR_TYPE_NEAR_REL_JMP] = X86_BR_JMP,
1377 [ARCH_LBR_BR_TYPE_NEAR_IND_CALL] = X86_BR_IND_CALL,
1378 [ARCH_LBR_BR_TYPE_NEAR_REL_CALL] = X86_BR_CALL,
1379 [ARCH_LBR_BR_TYPE_NEAR_RET] = X86_BR_RET,
1383 * implement actual branch filter based on user demand.
1384 * Hardware may not exactly satisfy that request, thus
1385 * we need to inspect opcodes. Mismatched branches are
1386 * discarded. Therefore, the number of branches returned
1387 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1390 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1393 int br_sel = cpuc->br_sel;
1394 int i, j, type, to_plm;
1395 bool compress = false;
1397 /* if sampling all branches, then nothing to filter */
1398 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1399 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1402 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1404 from = cpuc->lbr_entries[i].from;
1405 to = cpuc->lbr_entries[i].to;
1406 type = cpuc->lbr_entries[i].type;
1409 * Parse the branch type recorded in LBR_x_INFO MSR.
1410 * Doesn't support OTHER_BRANCH decoding for now.
1411 * OTHER_BRANCH branch type still rely on software decoding.
1413 if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1414 type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1415 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1416 type = arch_lbr_br_type_map[type] | to_plm;
1418 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1419 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1420 if (cpuc->lbr_entries[i].in_tx)
1421 type |= X86_BR_IN_TX;
1423 type |= X86_BR_NO_TX;
1426 /* if type does not correspond, then discard */
1427 if (type == X86_BR_NONE || (br_sel & type) != type) {
1428 cpuc->lbr_entries[i].from = 0;
1432 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1433 cpuc->lbr_entries[i].type = common_branch_type(type);
1439 /* remove all entries with from=0 */
1440 for (i = 0; i < cpuc->lbr_stack.nr; ) {
1441 if (!cpuc->lbr_entries[i].from) {
1443 while (++j < cpuc->lbr_stack.nr)
1444 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1445 cpuc->lbr_stack.nr--;
1446 if (!cpuc->lbr_entries[i].from)
1453 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1455 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1457 /* Cannot get TOS for large PEBS and Arch LBR */
1458 if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1459 (cpuc->n_pebs == cpuc->n_large_pebs))
1460 cpuc->lbr_stack.hw_idx = -1ULL;
1462 cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1464 intel_pmu_store_lbr(cpuc, lbr);
1465 intel_pmu_lbr_filter(cpuc);
1469 * Map interface branch filters onto LBR filters
1471 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1472 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1473 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1474 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1475 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1476 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1477 | LBR_IND_JMP | LBR_FAR,
1479 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1481 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1482 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1484 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1486 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1487 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1488 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1491 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1492 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1493 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1494 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1495 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1496 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1497 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1499 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1500 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1501 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1502 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1505 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1506 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1507 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1508 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1509 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1510 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1511 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1513 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1514 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1515 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1516 | LBR_RETURN | LBR_CALL_STACK,
1517 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1518 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1521 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1522 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = ARCH_LBR_ANY,
1523 [PERF_SAMPLE_BRANCH_USER_SHIFT] = ARCH_LBR_USER,
1524 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = ARCH_LBR_KERNEL,
1525 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1526 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = ARCH_LBR_RETURN |
1527 ARCH_LBR_OTHER_BRANCH,
1528 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = ARCH_LBR_REL_CALL |
1530 ARCH_LBR_OTHER_BRANCH,
1531 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = ARCH_LBR_IND_CALL,
1532 [PERF_SAMPLE_BRANCH_COND_SHIFT] = ARCH_LBR_JCC,
1533 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = ARCH_LBR_REL_CALL |
1536 ARCH_LBR_CALL_STACK,
1537 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = ARCH_LBR_IND_JMP,
1538 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = ARCH_LBR_REL_CALL,
1542 void __init intel_pmu_lbr_init_core(void)
1545 x86_pmu.lbr_tos = MSR_LBR_TOS;
1546 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1547 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1550 * SW branch filter usage:
1551 * - compensate for lack of HW filter
1555 /* nehalem/westmere */
1556 void __init intel_pmu_lbr_init_nhm(void)
1558 x86_pmu.lbr_nr = 16;
1559 x86_pmu.lbr_tos = MSR_LBR_TOS;
1560 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1561 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1563 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1564 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1567 * SW branch filter usage:
1568 * - workaround LBR_SEL errata (see above)
1569 * - support syscall, sysret capture.
1570 * That requires LBR_FAR but that means far
1571 * jmp need to be filtered out
1576 void __init intel_pmu_lbr_init_snb(void)
1578 x86_pmu.lbr_nr = 16;
1579 x86_pmu.lbr_tos = MSR_LBR_TOS;
1580 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1581 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1583 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1584 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1587 * SW branch filter usage:
1588 * - support syscall, sysret capture.
1589 * That requires LBR_FAR but that means far
1590 * jmp need to be filtered out
1594 static inline struct kmem_cache *
1595 create_lbr_kmem_cache(size_t size, size_t align)
1597 return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1601 void intel_pmu_lbr_init_hsw(void)
1603 size_t size = sizeof(struct x86_perf_task_context);
1605 x86_pmu.lbr_nr = 16;
1606 x86_pmu.lbr_tos = MSR_LBR_TOS;
1607 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1608 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1610 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1611 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1613 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1615 if (lbr_from_signext_quirk_needed())
1616 static_branch_enable(&lbr_from_quirk_key);
1620 __init void intel_pmu_lbr_init_skl(void)
1622 size_t size = sizeof(struct x86_perf_task_context);
1624 x86_pmu.lbr_nr = 32;
1625 x86_pmu.lbr_tos = MSR_LBR_TOS;
1626 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1627 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1628 x86_pmu.lbr_info = MSR_LBR_INFO_0;
1630 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1631 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1633 x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1636 * SW branch filter usage:
1637 * - support syscall, sysret capture.
1638 * That requires LBR_FAR but that means far
1639 * jmp need to be filtered out
1644 void __init intel_pmu_lbr_init_atom(void)
1647 * only models starting at stepping 10 seems
1648 * to have an operational LBR which can freeze
1651 if (boot_cpu_data.x86_model == 28
1652 && boot_cpu_data.x86_stepping < 10) {
1653 pr_cont("LBR disabled due to erratum");
1658 x86_pmu.lbr_tos = MSR_LBR_TOS;
1659 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1660 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1663 * SW branch filter usage:
1664 * - compensate for lack of HW filter
1669 void __init intel_pmu_lbr_init_slm(void)
1672 x86_pmu.lbr_tos = MSR_LBR_TOS;
1673 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1674 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1676 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1677 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1680 * SW branch filter usage:
1681 * - compensate for lack of HW filter
1683 pr_cont("8-deep LBR, ");
1686 /* Knights Landing */
1687 void intel_pmu_lbr_init_knl(void)
1690 x86_pmu.lbr_tos = MSR_LBR_TOS;
1691 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1692 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1694 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1695 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1697 /* Knights Landing does have MISPREDICT bit */
1698 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1699 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1702 void intel_pmu_lbr_init(void)
1704 switch (x86_pmu.intel_cap.lbr_format) {
1705 case LBR_FORMAT_EIP_FLAGS2:
1706 x86_pmu.lbr_has_tsx = 1;
1708 case LBR_FORMAT_EIP_FLAGS:
1709 x86_pmu.lbr_from_flags = 1;
1712 case LBR_FORMAT_INFO:
1713 x86_pmu.lbr_has_tsx = 1;
1715 case LBR_FORMAT_INFO2:
1716 x86_pmu.lbr_has_info = 1;
1719 case LBR_FORMAT_TIME:
1720 x86_pmu.lbr_from_flags = 1;
1721 x86_pmu.lbr_to_cycles = 1;
1725 if (x86_pmu.lbr_has_info) {
1727 * Only used in combination with baseline pebs.
1729 static_branch_enable(&x86_lbr_mispred);
1730 static_branch_enable(&x86_lbr_cycles);
1735 * LBR state size is variable based on the max number of registers.
1736 * This calculates the expected state size, which should match
1737 * what the hardware enumerates for the size of XFEATURE_LBR.
1739 static inline unsigned int get_lbr_state_size(void)
1741 return sizeof(struct arch_lbr_state) +
1742 x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1745 static bool is_arch_lbr_xsave_available(void)
1747 if (!boot_cpu_has(X86_FEATURE_XSAVES))
1751 * Check the LBR state with the corresponding software structure.
1752 * Disable LBR XSAVES support if the size doesn't match.
1754 if (xfeature_size(XFEATURE_LBR) == 0)
1757 if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1763 void __init intel_pmu_arch_lbr_init(void)
1765 struct pmu *pmu = x86_get_pmu(smp_processor_id());
1766 union cpuid28_eax eax;
1767 union cpuid28_ebx ebx;
1768 union cpuid28_ecx ecx;
1769 unsigned int unused_edx;
1770 bool arch_lbr_xsave;
1774 /* Arch LBR Capabilities */
1775 cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1777 lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1779 goto clear_arch_lbr;
1781 /* Apply the max depth of Arch LBR */
1782 if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1783 goto clear_arch_lbr;
1785 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1786 x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1787 x86_pmu.lbr_lip = eax.split.lbr_lip;
1788 x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1789 x86_pmu.lbr_filter = ebx.split.lbr_filter;
1790 x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1791 x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1792 x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1793 x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1794 x86_pmu.lbr_nr = lbr_nr;
1796 if (x86_pmu.lbr_mispred)
1797 static_branch_enable(&x86_lbr_mispred);
1798 if (x86_pmu.lbr_timed_lbr)
1799 static_branch_enable(&x86_lbr_cycles);
1800 if (x86_pmu.lbr_br_type)
1801 static_branch_enable(&x86_lbr_type);
1803 arch_lbr_xsave = is_arch_lbr_xsave_available();
1804 if (arch_lbr_xsave) {
1805 size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1806 get_lbr_state_size();
1807 pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1811 if (!pmu->task_ctx_cache) {
1812 arch_lbr_xsave = false;
1814 size = sizeof(struct x86_perf_task_context_arch_lbr) +
1815 lbr_nr * sizeof(struct lbr_entry);
1816 pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1819 x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1820 x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1821 x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1823 /* LBR callstack requires both CPL and Branch Filtering support */
1824 if (!x86_pmu.lbr_cpl ||
1825 !x86_pmu.lbr_filter ||
1826 !x86_pmu.lbr_call_stack)
1827 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1829 if (!x86_pmu.lbr_cpl) {
1830 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1831 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1832 } else if (!x86_pmu.lbr_filter) {
1833 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1834 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1835 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1836 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1837 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1838 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1839 arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1842 x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1843 x86_pmu.lbr_ctl_map = arch_lbr_ctl_map;
1845 if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1846 x86_pmu.lbr_ctl_map = NULL;
1848 x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1849 if (arch_lbr_xsave) {
1850 x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1851 x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1852 x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1855 x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1856 x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1857 x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1860 pr_cont("Architectural LBR, ");
1865 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
1869 * x86_perf_get_lbr - get the LBR records information
1871 * @lbr: the caller's memory to store the LBR records information
1873 * Returns: 0 indicates the LBR info has been successfully obtained
1875 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1877 int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1879 lbr->nr = x86_pmu.lbr_nr;
1880 lbr->from = x86_pmu.lbr_from;
1881 lbr->to = x86_pmu.lbr_to;
1882 lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1886 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1888 struct event_constraint vlbr_constraint =
1889 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1890 FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);