2 * Intel(R) Processor Trace PMU driver for perf
3 * Copyright (c) 2013-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * Intel PT is specified in the Intel Architecture Instruction Set Extensions
15 * Programming Reference:
16 * http://software.intel.com/en-us/intel-isa-extensions
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/device.h>
27 #include <asm/perf_event.h>
30 #include <asm/intel_pt.h>
31 #include <asm/intel-family.h>
33 #include "../perf_event.h"
36 static DEFINE_PER_CPU(struct pt, pt_ctx);
38 static struct pt_pmu pt_pmu;
41 * Capabilities of Intel PT hardware, such as number of address bits or
42 * supported output schemes, are cached and exported to userspace as "caps"
43 * attribute group of pt pmu device
44 * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
45 * relevant bits together with intel_pt traces.
47 * These are necessary for both trace decoding (payloads_lip, contains address
48 * width encoded in IP-related packets), and event configuration (bitmasks with
49 * permitted values for certain bit fields).
51 #define PT_CAP(_n, _l, _r, _m) \
52 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
53 .reg = _r, .mask = _m }
55 static struct pt_cap_desc {
61 PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
62 PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
63 PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
64 PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
65 PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
66 PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
67 PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
68 PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
69 PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
70 PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
71 PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)),
72 PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
73 PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
74 PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
75 PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
76 PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
79 u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
81 struct pt_cap_desc *cd = &pt_caps[capability];
82 u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
83 unsigned int shift = __ffs(cd->mask);
85 return (c & cd->mask) >> shift;
87 EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
89 u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
91 return intel_pt_validate_cap(pt_pmu.caps, cap);
93 EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
95 static ssize_t pt_cap_show(struct device *cdev,
96 struct device_attribute *attr,
99 struct dev_ext_attribute *ea =
100 container_of(attr, struct dev_ext_attribute, attr);
101 enum pt_capabilities cap = (long)ea->var;
103 return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
106 static struct attribute_group pt_cap_group __ro_after_init = {
110 PMU_FORMAT_ATTR(pt, "config:0" );
111 PMU_FORMAT_ATTR(cyc, "config:1" );
112 PMU_FORMAT_ATTR(pwr_evt, "config:4" );
113 PMU_FORMAT_ATTR(fup_on_ptw, "config:5" );
114 PMU_FORMAT_ATTR(mtc, "config:9" );
115 PMU_FORMAT_ATTR(tsc, "config:10" );
116 PMU_FORMAT_ATTR(noretcomp, "config:11" );
117 PMU_FORMAT_ATTR(ptw, "config:12" );
118 PMU_FORMAT_ATTR(branch, "config:13" );
119 PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
120 PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
121 PMU_FORMAT_ATTR(psb_period, "config:24-27" );
123 static struct attribute *pt_formats_attr[] = {
124 &format_attr_pt.attr,
125 &format_attr_cyc.attr,
126 &format_attr_pwr_evt.attr,
127 &format_attr_fup_on_ptw.attr,
128 &format_attr_mtc.attr,
129 &format_attr_tsc.attr,
130 &format_attr_noretcomp.attr,
131 &format_attr_ptw.attr,
132 &format_attr_branch.attr,
133 &format_attr_mtc_period.attr,
134 &format_attr_cyc_thresh.attr,
135 &format_attr_psb_period.attr,
139 static struct attribute_group pt_format_group = {
141 .attrs = pt_formats_attr,
145 pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
148 struct perf_pmu_events_attr *pmu_attr =
149 container_of(attr, struct perf_pmu_events_attr, attr);
151 switch (pmu_attr->id) {
153 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
155 return sprintf(page, "%u:%u\n",
165 PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
166 pt_timing_attr_show);
167 PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
168 pt_timing_attr_show);
170 static struct attribute *pt_timing_attr[] = {
171 &timing_attr_max_nonturbo_ratio.attr.attr,
172 &timing_attr_tsc_art_ratio.attr.attr,
176 static struct attribute_group pt_timing_group = {
177 .attrs = pt_timing_attr,
180 static const struct attribute_group *pt_attr_groups[] = {
187 static int __init pt_pmu_hw_init(void)
189 struct dev_ext_attribute *de_attrs;
190 struct attribute **attrs;
196 rdmsrl(MSR_PLATFORM_INFO, reg);
197 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
200 * if available, read in TSC to core crystal clock ratio,
201 * otherwise, zero for numerator stands for "not enumerated"
204 if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
205 u32 eax, ebx, ecx, edx;
207 cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
209 pt_pmu.tsc_art_num = ebx;
210 pt_pmu.tsc_art_den = eax;
213 /* model-specific quirks */
214 switch (boot_cpu_data.x86_model) {
215 case INTEL_FAM6_BROADWELL_CORE:
216 case INTEL_FAM6_BROADWELL_XEON_D:
217 case INTEL_FAM6_BROADWELL_GT3E:
218 case INTEL_FAM6_BROADWELL_X:
219 /* not setting BRANCH_EN will #GP, erratum BDM106 */
220 pt_pmu.branch_en_always_on = true;
226 if (boot_cpu_has(X86_FEATURE_VMX)) {
228 * Intel SDM, 36.5 "Tracing post-VMXON" says that
229 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
232 rdmsrl(MSR_IA32_VMX_MISC, reg);
239 for (i = 0; i < PT_CPUID_LEAVES; i++) {
241 &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
242 &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
243 &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
244 &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
248 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
249 attrs = kzalloc(size, GFP_KERNEL);
253 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
254 de_attrs = kzalloc(size, GFP_KERNEL);
258 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
259 struct dev_ext_attribute *de_attr = de_attrs + i;
261 de_attr->attr.attr.name = pt_caps[i].name;
263 sysfs_attr_init(&de_attr->attr.attr);
265 de_attr->attr.attr.mode = S_IRUGO;
266 de_attr->attr.show = pt_cap_show;
267 de_attr->var = (void *)i;
269 attrs[i] = &de_attr->attr.attr;
272 pt_cap_group.attrs = attrs;
282 #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
283 RTIT_CTL_CYC_THRESH | \
286 #define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
289 #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \
293 * Bit 0 (TraceEn) in the attr.config is meaningless as the
294 * corresponding bit in the RTIT_CTL can only be controlled
295 * by the driver; therefore, repurpose it to mean: pass
296 * through the bit that was previously assumed to be always
297 * on for PT, thereby allowing the user to *not* set it if
298 * they so wish. See also pt_event_valid() and pt_config().
300 #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
302 #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \
305 RTIT_CTL_BRANCH_EN | \
308 RTIT_CTL_PWR_EVT_EN | \
309 RTIT_CTL_FUP_ON_PTW | \
312 static bool pt_event_valid(struct perf_event *event)
314 u64 config = event->attr.config;
315 u64 allowed, requested;
317 if ((config & PT_CONFIG_MASK) != config)
320 if (config & RTIT_CTL_CYC_PSB) {
321 if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
324 allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
325 requested = (config & RTIT_CTL_PSB_FREQ) >>
326 RTIT_CTL_PSB_FREQ_OFFSET;
327 if (requested && (!(allowed & BIT(requested))))
330 allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
331 requested = (config & RTIT_CTL_CYC_THRESH) >>
332 RTIT_CTL_CYC_THRESH_OFFSET;
333 if (requested && (!(allowed & BIT(requested))))
337 if (config & RTIT_CTL_MTC) {
339 * In the unlikely case that CPUID lists valid mtc periods,
340 * but not the mtc capability, drop out here.
342 * Spec says that setting mtc period bits while mtc bit in
343 * CPUID is 0 will #GP, so better safe than sorry.
345 if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
348 allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
352 requested = (config & RTIT_CTL_MTC_RANGE) >>
353 RTIT_CTL_MTC_RANGE_OFFSET;
355 if (!(allowed & BIT(requested)))
359 if (config & RTIT_CTL_PWR_EVT_EN &&
360 !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
363 if (config & RTIT_CTL_PTW) {
364 if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
367 /* FUPonPTW without PTW doesn't make sense */
368 if ((config & RTIT_CTL_FUP_ON_PTW) &&
369 !(config & RTIT_CTL_PTW_EN))
374 * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
375 * clears the assomption that BranchEn must always be enabled,
376 * as was the case with the first implementation of PT.
377 * If this bit is not set, the legacy behavior is preserved
378 * for compatibility with the older userspace.
380 * Re-using bit 0 for this purpose is fine because it is never
381 * directly set by the user; previous attempts at setting it in
382 * the attr.config resulted in -EINVAL.
384 if (config & RTIT_CTL_PASSTHROUGH) {
386 * Disallow not setting BRANCH_EN where BRANCH_EN is
389 if (pt_pmu.branch_en_always_on &&
390 !(config & RTIT_CTL_BRANCH_EN))
394 * Disallow BRANCH_EN without the PASSTHROUGH.
396 if (config & RTIT_CTL_BRANCH_EN)
404 * PT configuration helpers
405 * These all are cpu affine and operate on a local PT
408 /* Address ranges and their corresponding msr configuration registers */
409 static const struct pt_address_range {
412 unsigned int reg_off;
413 } pt_address_ranges[] = {
415 .msr_a = MSR_IA32_RTIT_ADDR0_A,
416 .msr_b = MSR_IA32_RTIT_ADDR0_B,
417 .reg_off = RTIT_CTL_ADDR0_OFFSET,
420 .msr_a = MSR_IA32_RTIT_ADDR1_A,
421 .msr_b = MSR_IA32_RTIT_ADDR1_B,
422 .reg_off = RTIT_CTL_ADDR1_OFFSET,
425 .msr_a = MSR_IA32_RTIT_ADDR2_A,
426 .msr_b = MSR_IA32_RTIT_ADDR2_B,
427 .reg_off = RTIT_CTL_ADDR2_OFFSET,
430 .msr_a = MSR_IA32_RTIT_ADDR3_A,
431 .msr_b = MSR_IA32_RTIT_ADDR3_B,
432 .reg_off = RTIT_CTL_ADDR3_OFFSET,
436 static u64 pt_config_filters(struct perf_event *event)
438 struct pt_filters *filters = event->hw.addr_filters;
439 struct pt *pt = this_cpu_ptr(&pt_ctx);
440 unsigned int range = 0;
446 perf_event_addr_filters_sync(event);
448 for (range = 0; range < filters->nr_filters; range++) {
449 struct pt_filter *filter = &filters->filter[range];
452 * Note, if the range has zero start/end addresses due
453 * to its dynamic object not being loaded yet, we just
454 * go ahead and program zeroed range, which will simply
455 * produce no data. Note^2: if executable code at 0x0
456 * is a concern, we can set up an "invalid" configuration
457 * such as msr_b < msr_a.
460 /* avoid redundant msr writes */
461 if (pt->filters.filter[range].msr_a != filter->msr_a) {
462 wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
463 pt->filters.filter[range].msr_a = filter->msr_a;
466 if (pt->filters.filter[range].msr_b != filter->msr_b) {
467 wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
468 pt->filters.filter[range].msr_b = filter->msr_b;
471 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
477 static void pt_config(struct perf_event *event)
479 struct pt *pt = this_cpu_ptr(&pt_ctx);
482 /* First round: clear STATUS, in particular the PSB byte counter. */
483 if (!event->hw.config) {
484 perf_event_itrace_started(event);
485 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
488 reg = pt_config_filters(event);
489 reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
492 * Previously, we had BRANCH_EN on by default, but now that PT has
493 * grown features outside of branch tracing, it is useful to allow
494 * the user to disable it. Setting bit 0 in the event's attr.config
495 * allows BRANCH_EN to pass through instead of being always on. See
496 * also the comment in pt_event_valid().
498 if (event->attr.config & BIT(0)) {
499 reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
501 reg |= RTIT_CTL_BRANCH_EN;
504 if (!event->attr.exclude_kernel)
506 if (!event->attr.exclude_user)
509 reg |= (event->attr.config & PT_CONFIG_MASK);
511 event->hw.config = reg;
512 if (READ_ONCE(pt->vmx_on))
513 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
515 wrmsrl(MSR_IA32_RTIT_CTL, reg);
518 static void pt_config_stop(struct perf_event *event)
520 struct pt *pt = this_cpu_ptr(&pt_ctx);
521 u64 ctl = READ_ONCE(event->hw.config);
523 /* may be already stopped by a PMI */
524 if (!(ctl & RTIT_CTL_TRACEEN))
527 ctl &= ~RTIT_CTL_TRACEEN;
528 if (!READ_ONCE(pt->vmx_on))
529 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
531 WRITE_ONCE(event->hw.config, ctl);
534 * A wrmsr that disables trace generation serializes other PT
535 * registers and causes all data packets to be written to memory,
536 * but a fence is required for the data to become globally visible.
538 * The below WMB, separating data store and aux_head store matches
539 * the consumer's RMB that separates aux_head load and data load.
544 static void pt_config_buffer(void *buf, unsigned int topa_idx,
545 unsigned int output_off)
549 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
551 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
553 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
557 * Keep ToPA table-related metadata on the same page as the actual table,
558 * taking up a few words from the top
561 #define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
564 * struct topa - page-sized ToPA table with metadata at the top
565 * @table: actual ToPA table entries, as understood by PT hardware
566 * @list: linkage to struct pt_buffer's list of tables
567 * @phys: physical address of this page
568 * @offset: offset of the first entry in this table in the buffer
569 * @size: total size of all entries in this table
570 * @last: index of the last initialized entry in this table
573 struct topa_entry table[TENTS_PER_PAGE];
574 struct list_head list;
581 /* make -1 stand for the last table entry */
582 #define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
585 * topa_alloc() - allocate page-sized ToPA table
586 * @cpu: CPU on which to allocate.
587 * @gfp: Allocation flags.
589 * Return: On success, return the pointer to ToPA table page.
591 static struct topa *topa_alloc(int cpu, gfp_t gfp)
593 int node = cpu_to_node(cpu);
597 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
601 topa = page_address(p);
603 topa->phys = page_to_phys(p);
606 * In case of singe-entry ToPA, always put the self-referencing END
607 * link as the 2nd entry in the table
609 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
610 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
611 TOPA_ENTRY(topa, 1)->end = 1;
618 * topa_free() - free a page-sized ToPA table
619 * @topa: Table to deallocate.
621 static void topa_free(struct topa *topa)
623 free_page((unsigned long)topa);
627 * topa_insert_table() - insert a ToPA table into a buffer
628 * @buf: PT buffer that's being extended.
629 * @topa: New topa table to be inserted.
631 * If it's the first table in this buffer, set up buffer's pointers
632 * accordingly; otherwise, add a END=1 link entry to @topa to the current
633 * "last" table and adjust the last table pointer to @topa.
635 static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
637 struct topa *last = buf->last;
639 list_add_tail(&topa->list, &buf->tables);
642 buf->first = buf->last = buf->cur = topa;
646 topa->offset = last->offset + last->size;
649 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
652 BUG_ON(last->last != TENTS_PER_PAGE - 1);
654 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
655 TOPA_ENTRY(last, -1)->end = 1;
659 * topa_table_full() - check if a ToPA table is filled up
662 static bool topa_table_full(struct topa *topa)
664 /* single-entry ToPA is a special case */
665 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
668 return topa->last == TENTS_PER_PAGE - 1;
672 * topa_insert_pages() - create a list of ToPA tables
673 * @buf: PT buffer being initialized.
674 * @gfp: Allocation flags.
676 * This initializes a list of ToPA tables with entries from
677 * the data_pages provided by rb_alloc_aux().
679 * Return: 0 on success or error code.
681 static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
683 struct topa *topa = buf->last;
687 p = virt_to_page(buf->data_pages[buf->nr_pages]);
689 order = page_private(p);
691 if (topa_table_full(topa)) {
692 topa = topa_alloc(buf->cpu, gfp);
696 topa_insert_table(buf, topa);
699 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
700 TOPA_ENTRY(topa, -1)->size = order;
701 if (!buf->snapshot &&
702 !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
703 TOPA_ENTRY(topa, -1)->intr = 1;
704 TOPA_ENTRY(topa, -1)->stop = 1;
708 topa->size += sizes(order);
710 buf->nr_pages += 1ul << order;
716 * pt_topa_dump() - print ToPA tables and their entries
719 static void pt_topa_dump(struct pt_buffer *buf)
723 list_for_each_entry(topa, &buf->tables, list) {
726 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
727 topa->phys, topa->offset, topa->size);
728 for (i = 0; i < TENTS_PER_PAGE; i++) {
729 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
731 (unsigned long)topa->table[i].base << TOPA_SHIFT,
732 sizes(topa->table[i].size),
733 topa->table[i].end ? 'E' : ' ',
734 topa->table[i].intr ? 'I' : ' ',
735 topa->table[i].stop ? 'S' : ' ',
736 *(u64 *)&topa->table[i]);
737 if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
738 topa->table[i].stop) ||
746 * pt_buffer_advance() - advance to the next output region
749 * Advance the current pointers in the buffer to the next ToPA entry.
751 static void pt_buffer_advance(struct pt_buffer *buf)
756 if (buf->cur_idx == buf->cur->last) {
757 if (buf->cur == buf->last)
758 buf->cur = buf->first;
760 buf->cur = list_entry(buf->cur->list.next, struct topa,
767 * pt_update_head() - calculate current offsets and sizes
768 * @pt: Per-cpu pt context.
770 * Update buffer's current write pointer position and data size.
772 static void pt_update_head(struct pt *pt)
774 struct pt_buffer *buf = perf_get_aux(&pt->handle);
775 u64 topa_idx, base, old;
777 /* offset of the first region in this table from the beginning of buf */
778 base = buf->cur->offset + buf->output_off;
780 /* offset of the current output region within this table */
781 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
782 base += sizes(buf->cur->table[topa_idx].size);
785 local_set(&buf->data_size, base);
787 old = (local64_xchg(&buf->head, base) &
788 ((buf->nr_pages << PAGE_SHIFT) - 1));
790 base += buf->nr_pages << PAGE_SHIFT;
792 local_add(base - old, &buf->data_size);
797 * pt_buffer_region() - obtain current output region's address
800 static void *pt_buffer_region(struct pt_buffer *buf)
802 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
806 * pt_buffer_region_size() - obtain current output region's size
809 static size_t pt_buffer_region_size(struct pt_buffer *buf)
811 return sizes(buf->cur->table[buf->cur_idx].size);
815 * pt_handle_status() - take care of possible status conditions
816 * @pt: Per-cpu pt context.
818 static void pt_handle_status(struct pt *pt)
820 struct pt_buffer *buf = perf_get_aux(&pt->handle);
824 rdmsrl(MSR_IA32_RTIT_STATUS, status);
826 if (status & RTIT_STATUS_ERROR) {
827 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
829 status &= ~RTIT_STATUS_ERROR;
832 if (status & RTIT_STATUS_STOPPED) {
833 status &= ~RTIT_STATUS_STOPPED;
836 * On systems that only do single-entry ToPA, hitting STOP
837 * means we are already losing data; need to let the decoder
840 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
841 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
842 perf_aux_output_flag(&pt->handle,
843 PERF_AUX_FLAG_TRUNCATED);
849 * Also on single-entry ToPA implementations, interrupt will come
850 * before the output reaches its output region's boundary.
852 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
854 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
855 void *head = pt_buffer_region(buf);
857 /* everything within this margin needs to be zeroed out */
858 memset(head + buf->output_off, 0,
859 pt_buffer_region_size(buf) -
865 pt_buffer_advance(buf);
867 wrmsrl(MSR_IA32_RTIT_STATUS, status);
871 * pt_read_offset() - translate registers into buffer pointers
874 * Set buffer's output pointers from MSR values.
876 static void pt_read_offset(struct pt_buffer *buf)
878 u64 offset, base_topa;
880 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
881 buf->cur = phys_to_virt(base_topa);
883 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
884 /* offset within current output region */
885 buf->output_off = offset >> 32;
886 /* index of current output region within this table */
887 buf->cur_idx = (offset & 0xffffff80) >> 7;
891 * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
893 * @pg: Page offset in the buffer.
895 * When advancing to the next output region (ToPA entry), given a page offset
896 * into the buffer, we need to find the offset of the first page in the next
899 static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
901 struct topa_entry *te = buf->topa_index[pg];
904 if (buf->first == buf->last && buf->first->last == 1)
909 pg &= buf->nr_pages - 1;
910 } while (buf->topa_index[pg] == te);
916 * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
918 * @handle: Current output handle.
920 * Place INT and STOP marks to prevent overwriting old data that the consumer
921 * hasn't yet collected and waking up the consumer after a certain fraction of
922 * the buffer has filled up. Only needed and sensible for non-snapshot counters.
924 * This obviously relies on buf::head to figure out buffer markers, so it has
925 * to be called after pt_buffer_reset_offsets() and before the hardware tracing
928 static int pt_buffer_reset_markers(struct pt_buffer *buf,
929 struct perf_output_handle *handle)
932 unsigned long head = local64_read(&buf->head);
933 unsigned long idx, npages, wakeup;
935 /* can't stop in the middle of an output region */
936 if (buf->output_off + handle->size + 1 <
937 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
938 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
943 /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
944 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
947 /* clear STOP and INT from current entry */
948 buf->topa_index[buf->stop_pos]->stop = 0;
949 buf->topa_index[buf->stop_pos]->intr = 0;
950 buf->topa_index[buf->intr_pos]->intr = 0;
952 /* how many pages till the STOP marker */
953 npages = handle->size >> PAGE_SHIFT;
955 /* if it's on a page boundary, fill up one more page */
956 if (!offset_in_page(head + handle->size + 1))
959 idx = (head >> PAGE_SHIFT) + npages;
960 idx &= buf->nr_pages - 1;
963 wakeup = handle->wakeup >> PAGE_SHIFT;
965 /* in the worst case, wake up the consumer one page before hard stop */
966 idx = (head >> PAGE_SHIFT) + npages - 1;
970 idx &= buf->nr_pages - 1;
973 buf->topa_index[buf->stop_pos]->stop = 1;
974 buf->topa_index[buf->stop_pos]->intr = 1;
975 buf->topa_index[buf->intr_pos]->intr = 1;
981 * pt_buffer_setup_topa_index() - build topa_index[] table of regions
984 * topa_index[] references output regions indexed by offset into the
985 * buffer for purposes of quick reverse lookup.
987 static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
989 struct topa *cur = buf->first, *prev = buf->last;
990 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
991 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
994 while (pg < buf->nr_pages) {
997 /* pages within one topa entry */
998 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
999 buf->topa_index[pg] = te_prev;
1003 if (idx == cur->last - 1) {
1004 /* advance to next topa table */
1006 cur = list_entry(cur->list.next, struct topa, list);
1010 te_cur = TOPA_ENTRY(cur, idx);
1016 * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1018 * @head: Write pointer (aux_head) from AUX buffer.
1020 * Find the ToPA table and entry corresponding to given @head and set buffer's
1021 * "current" pointers accordingly. This is done after we have obtained the
1022 * current aux_head position from a successful call to perf_aux_output_begin()
1023 * to make sure the hardware is writing to the right place.
1025 * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
1026 * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
1027 * which are used to determine INT and STOP markers' locations by a subsequent
1028 * call to pt_buffer_reset_markers().
1030 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1035 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1037 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
1038 pg = pt_topa_next_entry(buf, pg);
1040 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
1041 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
1042 (unsigned long)buf->cur) / sizeof(struct topa_entry);
1043 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
1045 local64_set(&buf->head, head);
1046 local_set(&buf->data_size, 0);
1050 * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1053 static void pt_buffer_fini_topa(struct pt_buffer *buf)
1055 struct topa *topa, *iter;
1057 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1059 * right now, this is in free_aux() path only, so
1060 * no need to unlink this table from the list
1067 * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1069 * @size: Total size of all regions within this ToPA.
1070 * @gfp: Allocation flags.
1072 static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
1078 topa = topa_alloc(buf->cpu, gfp);
1082 topa_insert_table(buf, topa);
1084 while (buf->nr_pages < nr_pages) {
1085 err = topa_insert_pages(buf, gfp);
1087 pt_buffer_fini_topa(buf);
1092 pt_buffer_setup_topa_index(buf);
1094 /* link last table to the first one, unless we're double buffering */
1095 if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
1096 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
1097 TOPA_ENTRY(buf->last, -1)->end = 1;
1105 * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1106 * @cpu: Cpu on which to allocate, -1 means current.
1107 * @pages: Array of pointers to buffer pages passed from perf core.
1108 * @nr_pages: Number of pages in the buffer.
1109 * @snapshot: If this is a snapshot/overwrite counter.
1111 * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1112 * bookkeeping for an AUX buffer.
1114 * Return: Our private PT buffer structure.
1117 pt_buffer_setup_aux(struct perf_event *event, void **pages,
1118 int nr_pages, bool snapshot)
1120 struct pt_buffer *buf;
1121 int node, ret, cpu = event->cpu;
1127 cpu = raw_smp_processor_id();
1128 node = cpu_to_node(cpu);
1130 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
1136 buf->snapshot = snapshot;
1137 buf->data_pages = pages;
1139 INIT_LIST_HEAD(&buf->tables);
1141 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
1151 * pt_buffer_free_aux() - perf AUX deallocation path callback
1154 static void pt_buffer_free_aux(void *data)
1156 struct pt_buffer *buf = data;
1158 pt_buffer_fini_topa(buf);
1162 static int pt_addr_filters_init(struct perf_event *event)
1164 struct pt_filters *filters;
1165 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1167 if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1170 filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1175 memcpy(filters, event->parent->hw.addr_filters,
1178 event->hw.addr_filters = filters;
1183 static void pt_addr_filters_fini(struct perf_event *event)
1185 kfree(event->hw.addr_filters);
1186 event->hw.addr_filters = NULL;
1189 static inline bool valid_kernel_ip(unsigned long ip)
1191 return virt_addr_valid(ip) && kernel_ip(ip);
1194 static int pt_event_addr_filters_validate(struct list_head *filters)
1196 struct perf_addr_filter *filter;
1199 list_for_each_entry(filter, filters, entry) {
1201 * PT doesn't support single address triggers and
1204 if (!filter->size ||
1205 filter->action == PERF_ADDR_FILTER_ACTION_START)
1208 if (!filter->path.dentry) {
1209 if (!valid_kernel_ip(filter->offset))
1212 if (!valid_kernel_ip(filter->offset + filter->size))
1216 if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1223 static void pt_event_addr_filters_sync(struct perf_event *event)
1225 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1226 unsigned long msr_a, msr_b;
1227 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
1228 struct pt_filters *filters = event->hw.addr_filters;
1229 struct perf_addr_filter *filter;
1235 list_for_each_entry(filter, &head->list, entry) {
1236 if (filter->path.dentry && !fr[range].start) {
1239 /* apply the offset */
1240 msr_a = fr[range].start;
1241 msr_b = msr_a + fr[range].size - 1;
1244 filters->filter[range].msr_a = msr_a;
1245 filters->filter[range].msr_b = msr_b;
1246 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER)
1247 filters->filter[range].config = 1;
1249 filters->filter[range].config = 2;
1253 filters->nr_filters = range;
1257 * intel_pt_interrupt() - PT PMI handler
1259 void intel_pt_interrupt(void)
1261 struct pt *pt = this_cpu_ptr(&pt_ctx);
1262 struct pt_buffer *buf;
1263 struct perf_event *event = pt->handle.event;
1266 * There may be a dangling PT bit in the interrupt status register
1267 * after PT has been disabled by pt_event_stop(). Make sure we don't
1268 * do anything (particularly, re-enable) for this event here.
1270 if (!READ_ONCE(pt->handle_nmi))
1276 pt_config_stop(event);
1278 buf = perf_get_aux(&pt->handle);
1282 pt_read_offset(buf);
1284 pt_handle_status(pt);
1288 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1290 if (!event->hw.state) {
1293 buf = perf_aux_output_begin(&pt->handle, event);
1295 event->hw.state = PERF_HES_STOPPED;
1299 pt_buffer_reset_offsets(buf, pt->handle.head);
1300 /* snapshot counters don't use PMI, so it's safe */
1301 ret = pt_buffer_reset_markers(buf, &pt->handle);
1303 perf_aux_output_end(&pt->handle, 0);
1307 pt_config_buffer(buf->cur->table, buf->cur_idx,
1313 void intel_pt_handle_vmx(int on)
1315 struct pt *pt = this_cpu_ptr(&pt_ctx);
1316 struct perf_event *event;
1317 unsigned long flags;
1319 /* PT plays nice with VMX, do nothing */
1324 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1325 * sure to not try to set it while VMX is on. Disable
1326 * interrupts to avoid racing with pmu callbacks;
1327 * concurrent PMI should be handled fine.
1329 local_irq_save(flags);
1330 WRITE_ONCE(pt->vmx_on, on);
1333 * If an AUX transaction is in progress, it will contain
1334 * gap(s), so flag it PARTIAL to inform the user.
1336 event = pt->handle.event;
1338 perf_aux_output_flag(&pt->handle,
1339 PERF_AUX_FLAG_PARTIAL);
1341 /* Turn PTs back on */
1343 wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config);
1345 local_irq_restore(flags);
1347 EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1353 static void pt_event_start(struct perf_event *event, int mode)
1355 struct hw_perf_event *hwc = &event->hw;
1356 struct pt *pt = this_cpu_ptr(&pt_ctx);
1357 struct pt_buffer *buf;
1359 buf = perf_aux_output_begin(&pt->handle, event);
1363 pt_buffer_reset_offsets(buf, pt->handle.head);
1364 if (!buf->snapshot) {
1365 if (pt_buffer_reset_markers(buf, &pt->handle))
1369 WRITE_ONCE(pt->handle_nmi, 1);
1372 pt_config_buffer(buf->cur->table, buf->cur_idx,
1379 perf_aux_output_end(&pt->handle, 0);
1381 hwc->state = PERF_HES_STOPPED;
1384 static void pt_event_stop(struct perf_event *event, int mode)
1386 struct pt *pt = this_cpu_ptr(&pt_ctx);
1389 * Protect against the PMI racing with disabling wrmsr,
1390 * see comment in intel_pt_interrupt().
1392 WRITE_ONCE(pt->handle_nmi, 0);
1394 pt_config_stop(event);
1396 if (event->hw.state == PERF_HES_STOPPED)
1399 event->hw.state = PERF_HES_STOPPED;
1401 if (mode & PERF_EF_UPDATE) {
1402 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1407 if (WARN_ON_ONCE(pt->handle.event != event))
1410 pt_read_offset(buf);
1412 pt_handle_status(pt);
1418 local_xchg(&buf->data_size,
1419 buf->nr_pages << PAGE_SHIFT);
1420 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1424 static void pt_event_del(struct perf_event *event, int mode)
1426 pt_event_stop(event, PERF_EF_UPDATE);
1429 static int pt_event_add(struct perf_event *event, int mode)
1431 struct pt *pt = this_cpu_ptr(&pt_ctx);
1432 struct hw_perf_event *hwc = &event->hw;
1435 if (pt->handle.event)
1438 if (mode & PERF_EF_START) {
1439 pt_event_start(event, 0);
1441 if (hwc->state == PERF_HES_STOPPED)
1444 hwc->state = PERF_HES_STOPPED;
1453 static void pt_event_read(struct perf_event *event)
1457 static void pt_event_destroy(struct perf_event *event)
1459 pt_addr_filters_fini(event);
1460 x86_del_exclusive(x86_lbr_exclusive_pt);
1463 static int pt_event_init(struct perf_event *event)
1465 if (event->attr.type != pt_pmu.pmu.type)
1468 if (!pt_event_valid(event))
1471 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1474 if (pt_addr_filters_init(event)) {
1475 x86_del_exclusive(x86_lbr_exclusive_pt);
1479 event->destroy = pt_event_destroy;
1484 void cpu_emergency_stop_pt(void)
1486 struct pt *pt = this_cpu_ptr(&pt_ctx);
1488 if (pt->handle.event)
1489 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1492 static __init int pt_init(void)
1494 int ret, cpu, prior_warn = 0;
1496 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1498 if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1502 for_each_online_cpu(cpu) {
1505 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1506 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1512 x86_add_exclusive(x86_lbr_exclusive_pt);
1513 pr_warn("PT is enabled at boot time, doing nothing\n");
1518 ret = pt_pmu_hw_init();
1522 if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
1523 pr_warn("ToPA output is not supported on this CPU\n");
1527 if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1528 pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
1530 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1531 pt_pmu.pmu.attr_groups = pt_attr_groups;
1532 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1533 pt_pmu.pmu.event_init = pt_event_init;
1534 pt_pmu.pmu.add = pt_event_add;
1535 pt_pmu.pmu.del = pt_event_del;
1536 pt_pmu.pmu.start = pt_event_start;
1537 pt_pmu.pmu.stop = pt_event_stop;
1538 pt_pmu.pmu.read = pt_event_read;
1539 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1540 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1541 pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
1542 pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1543 pt_pmu.pmu.nr_addr_filters =
1544 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
1546 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1550 arch_initcall(pt_init);