`perf kvm stat` supports record and report options.
By using the arch directory a report for a different machine type cannot
be supported.
Move the kvm-stat code out of the arch directory and into
util/kvm-stat-arch following the pattern of perf-regs and dwarf-regs.
Avoid duplicate symbols by renaming functions to have the architecture
name within them.
For global variables, wrap them in an architecture specific function.
Selecting the architecture to use with `perf kvm stat` is selected by
EM_HOST, ie no different than before the change.
Later the ELF machine can be determined from the session or a header
feature (ie EM_HOST at the time of the record).
The build and #define HAVE_KVM_STAT_SUPPORT is now redundant so remove
across Makefiles and in the build.
Opportunistically constify architectural structs and arrays.
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Aditya Bodkhe <aditya.b1@linux.ibm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: Anubhav Shelat <ashelat@redhat.com>
Cc: Anup Patel <anup@brainfault.org>
Cc: Athira Rajeev <atrajeev@linux.ibm.com>
Cc: Blake Jones <blakejones@google.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <pjw@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quan Zhou <zhouquan@iscas.ac.cn>
Cc: Shimin Guo <shimin.guo@skydio.com>
Cc: Swapnil Sapkal <swapnil.sapkal@amd.com>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yunseong Kim <ysk@kzalloc.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
endif
endif
-ifdef HAVE_KVM_STAT_SUPPORT
- CFLAGS += -DHAVE_KVM_STAT_SUPPORT
-endif
-
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-y += ../../arm/util/auxtrace.o
perf-util-y += ../../arm/util/cs-etm.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#ifndef ARCH_PERF_ARM64_EXCEPTION_TYPES_H
-#define ARCH_PERF_ARM64_EXCEPTION_TYPES_H
-
-/* Per asm/virt.h */
-#define HVC_STUB_ERR 0xbadca11
-
-/* Per asm/kvm_asm.h */
-#define ARM_EXCEPTION_IRQ 0
-#define ARM_EXCEPTION_EL1_SERROR 1
-#define ARM_EXCEPTION_TRAP 2
-#define ARM_EXCEPTION_IL 3
-/* The hyp-stub will return this for any kvm_call_hyp() call */
-#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-
-#define kvm_arm_exception_type \
- {ARM_EXCEPTION_IRQ, "IRQ" }, \
- {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
- {ARM_EXCEPTION_TRAP, "TRAP" }, \
- {ARM_EXCEPTION_IL, "ILLEGAL" }, \
- {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
-
-/* Per asm/esr.h */
-#define ESR_ELx_EC_UNKNOWN (0x00)
-#define ESR_ELx_EC_WFx (0x01)
-/* Unallocated EC: 0x02 */
-#define ESR_ELx_EC_CP15_32 (0x03)
-#define ESR_ELx_EC_CP15_64 (0x04)
-#define ESR_ELx_EC_CP14_MR (0x05)
-#define ESR_ELx_EC_CP14_LS (0x06)
-#define ESR_ELx_EC_FP_ASIMD (0x07)
-#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
-#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
-#define ESR_ELx_EC_OTHER (0x0A)
-/* Unallocated EC: 0x0B */
-#define ESR_ELx_EC_CP14_64 (0x0C)
-#define ESR_ELx_EC_BTI (0x0D)
-#define ESR_ELx_EC_ILL (0x0E)
-/* Unallocated EC: 0x0F - 0x10 */
-#define ESR_ELx_EC_SVC32 (0x11)
-#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
-#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
-/* Unallocated EC: 0x14 */
-#define ESR_ELx_EC_SVC64 (0x15)
-#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
-#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
-#define ESR_ELx_EC_SYS64 (0x18)
-#define ESR_ELx_EC_SVE (0x19)
-#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
-/* Unallocated EC: 0x1B */
-#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
-#define ESR_ELx_EC_SME (0x1D)
-/* Unallocated EC: 0x1E */
-#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
-#define ESR_ELx_EC_IABT_LOW (0x20)
-#define ESR_ELx_EC_IABT_CUR (0x21)
-#define ESR_ELx_EC_PC_ALIGN (0x22)
-/* Unallocated EC: 0x23 */
-#define ESR_ELx_EC_DABT_LOW (0x24)
-#define ESR_ELx_EC_DABT_CUR (0x25)
-#define ESR_ELx_EC_SP_ALIGN (0x26)
-#define ESR_ELx_EC_MOPS (0x27)
-#define ESR_ELx_EC_FP_EXC32 (0x28)
-/* Unallocated EC: 0x29 - 0x2B */
-#define ESR_ELx_EC_FP_EXC64 (0x2C)
-#define ESR_ELx_EC_GCS (0x2D)
-/* Unallocated EC: 0x2E */
-#define ESR_ELx_EC_SERROR (0x2F)
-#define ESR_ELx_EC_BREAKPT_LOW (0x30)
-#define ESR_ELx_EC_BREAKPT_CUR (0x31)
-#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
-#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
-#define ESR_ELx_EC_WATCHPT_LOW (0x34)
-#define ESR_ELx_EC_WATCHPT_CUR (0x35)
-/* Unallocated EC: 0x36 - 0x37 */
-#define ESR_ELx_EC_BKPT32 (0x38)
-/* Unallocated EC: 0x39 */
-#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
-/* Unallocated EC: 0x3B */
-#define ESR_ELx_EC_BRK64 (0x3C)
-/* Unallocated EC: 0x3D - 0x3F */
-#define ESR_ELx_EC_MAX (0x3F)
-
-#define ECN(x) { ESR_ELx_EC_##x, #x }
-
-#define kvm_arm_exception_class \
- ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
- ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
- ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
- ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
- ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
- ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
- ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
- ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
- ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
-
-#endif /* ARCH_PERF_ARM64_EXCEPTION_TYPES_H */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <memory.h>
-#include "../../../util/evsel.h"
-#include "../../../util/kvm-stat.h"
-#include "arm64_exception_types.h"
-#include "debug.h"
-
-define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
-define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
-
-const char *kvm_trap_exit_reason = "esr_ec";
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "ret";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-
-const char *kvm_events_tp[] = {
- "kvm:kvm_entry",
- "kvm:kvm_exit",
- NULL,
-};
-
-static void event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
- key->exit_reasons = arm64_exit_reasons;
-
- /*
- * TRAP exceptions carry exception class info in esr_ec field
- * and, hence, we need to use a different exit_reasons table to
- * properly decode event's est_ec.
- */
- if (key->key == ARM_EXCEPTION_TRAP) {
- key->key = evsel__intval(evsel, sample, kvm_trap_exit_reason);
- key->exit_reasons = arm64_trap_exit_reasons;
- }
-}
-
-static bool event_begin(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return evsel__name_is(evsel, kvm_entry_trace);
-}
-
-static bool event_end(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- if (evsel__name_is(evsel, kvm_exit_trace)) {
- event_get_key(evsel, sample, key);
- return true;
- }
- return false;
-}
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = event_begin,
- .is_end_event = event_end,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- {
- .name = "vmexit",
- .ops = &exit_events,
- },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- NULL,
-};
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
-{
- kvm->exit_reasons_isa = "arm64";
- return 0;
-}
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <memory.h>
-#include "util/kvm-stat.h"
-#include "util/parse-events.h"
-#include "util/debug.h"
-#include "util/evsel.h"
-#include "util/evlist.h"
-#include "util/pmus.h"
-
-#define LOONGARCH_EXCEPTION_INT 0
-#define LOONGARCH_EXCEPTION_PIL 1
-#define LOONGARCH_EXCEPTION_PIS 2
-#define LOONGARCH_EXCEPTION_PIF 3
-#define LOONGARCH_EXCEPTION_PME 4
-#define LOONGARCH_EXCEPTION_FPD 15
-#define LOONGARCH_EXCEPTION_SXD 16
-#define LOONGARCH_EXCEPTION_ASXD 17
-#define LOONGARCH_EXCEPTION_GSPR 22
-#define LOONGARCH_EXCEPTION_CPUCFG 100
-#define LOONGARCH_EXCEPTION_CSR 101
-#define LOONGARCH_EXCEPTION_IOCSR 102
-#define LOONGARCH_EXCEPTION_IDLE 103
-#define LOONGARCH_EXCEPTION_OTHERS 104
-#define LOONGARCH_EXCEPTION_HVC 23
-
-#define loongarch_exception_type \
- {LOONGARCH_EXCEPTION_INT, "Interrupt" }, \
- {LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \
- {LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \
- {LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \
- {LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \
- {LOONGARCH_EXCEPTION_FPD, "FPU" }, \
- {LOONGARCH_EXCEPTION_SXD, "LSX" }, \
- {LOONGARCH_EXCEPTION_ASXD, "LASX" }, \
- {LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \
- {LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \
- {LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \
- {LOONGARCH_EXCEPTION_CSR, "CSR" }, \
- {LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \
- {LOONGARCH_EXCEPTION_IDLE, "Idle" }, \
- {LOONGARCH_EXCEPTION_OTHERS, "Others" }
-
-define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
-
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_exit_reason = "reason";
-const char *kvm_entry_trace = "kvm:kvm_enter";
-const char *kvm_reenter_trace = "kvm:kvm_reenter";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-const char *kvm_events_tp[] = {
- "kvm:kvm_enter",
- "kvm:kvm_reenter",
- "kvm:kvm_exit",
- "kvm:kvm_exit_gspr",
- NULL,
-};
-
-static bool event_begin(struct evsel *evsel,
- struct perf_sample *sample, struct event_key *key)
-{
- return exit_event_begin(evsel, sample, key);
-}
-
-static bool event_end(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- /*
- * LoongArch kvm is different with other architectures
- *
- * There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with
- * kvm:kvm_exit event.
- * kvm:kvm_enter means returning to vmm and then to guest
- * kvm:kvm_reenter means returning to guest immediately
- */
- return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace);
-}
-
-static void event_gspr_get_key(struct evsel *evsel,
- struct perf_sample *sample, struct event_key *key)
-{
- unsigned int insn;
-
- key->key = LOONGARCH_EXCEPTION_OTHERS;
- insn = evsel__intval(evsel, sample, "inst_word");
-
- switch (insn >> 24) {
- case 0:
- /* CPUCFG inst trap */
- if ((insn >> 10) == 0x1b)
- key->key = LOONGARCH_EXCEPTION_CPUCFG;
- break;
- case 4:
- /* CSR inst trap */
- key->key = LOONGARCH_EXCEPTION_CSR;
- break;
- case 6:
- /* IOCSR inst trap */
- if ((insn >> 15) == 0xc90)
- key->key = LOONGARCH_EXCEPTION_IOCSR;
- else if ((insn >> 15) == 0xc91)
- /* Idle inst trap */
- key->key = LOONGARCH_EXCEPTION_IDLE;
- break;
- default:
- key->key = LOONGARCH_EXCEPTION_OTHERS;
- break;
- }
-}
-
-static struct child_event_ops child_events[] = {
- { .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
- { NULL, NULL },
-};
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = event_begin,
- .is_end_event = event_end,
- .child_ops = child_events,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- { .name = "vmexit", .ops = &exit_events, },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- NULL,
-};
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
-{
- kvm->exit_reasons_isa = "loongarch64";
- kvm->exit_reasons = loongarch_exit_reasons;
- return 0;
-}
# SPDX-License-Identifier: GPL-2.0
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += mem-events.o
perf-util-y += pmu.o
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_PERF_BOOK3S_HV_HCALLS_H
-#define ARCH_PERF_BOOK3S_HV_HCALLS_H
-
-/*
- * PowerPC HCALL codes : hcall code to name mapping
- */
-#define kvm_trace_symbol_hcall \
- {0x4, "H_REMOVE"}, \
- {0x8, "H_ENTER"}, \
- {0xc, "H_READ"}, \
- {0x10, "H_CLEAR_MOD"}, \
- {0x14, "H_CLEAR_REF"}, \
- {0x18, "H_PROTECT"}, \
- {0x1c, "H_GET_TCE"}, \
- {0x20, "H_PUT_TCE"}, \
- {0x24, "H_SET_SPRG0"}, \
- {0x28, "H_SET_DABR"}, \
- {0x2c, "H_PAGE_INIT"}, \
- {0x30, "H_SET_ASR"}, \
- {0x34, "H_ASR_ON"}, \
- {0x38, "H_ASR_OFF"}, \
- {0x3c, "H_LOGICAL_CI_LOAD"}, \
- {0x40, "H_LOGICAL_CI_STORE"}, \
- {0x44, "H_LOGICAL_CACHE_LOAD"}, \
- {0x48, "H_LOGICAL_CACHE_STORE"}, \
- {0x4c, "H_LOGICAL_ICBI"}, \
- {0x50, "H_LOGICAL_DCBF"}, \
- {0x54, "H_GET_TERM_CHAR"}, \
- {0x58, "H_PUT_TERM_CHAR"}, \
- {0x5c, "H_REAL_TO_LOGICAL"}, \
- {0x60, "H_HYPERVISOR_DATA"}, \
- {0x64, "H_EOI"}, \
- {0x68, "H_CPPR"}, \
- {0x6c, "H_IPI"}, \
- {0x70, "H_IPOLL"}, \
- {0x74, "H_XIRR"}, \
- {0x78, "H_MIGRATE_DMA"}, \
- {0x7c, "H_PERFMON"}, \
- {0xdc, "H_REGISTER_VPA"}, \
- {0xe0, "H_CEDE"}, \
- {0xe4, "H_CONFER"}, \
- {0xe8, "H_PROD"}, \
- {0xec, "H_GET_PPP"}, \
- {0xf0, "H_SET_PPP"}, \
- {0xf4, "H_PURR"}, \
- {0xf8, "H_PIC"}, \
- {0xfc, "H_REG_CRQ"}, \
- {0x100, "H_FREE_CRQ"}, \
- {0x104, "H_VIO_SIGNAL"}, \
- {0x108, "H_SEND_CRQ"}, \
- {0x110, "H_COPY_RDMA"}, \
- {0x114, "H_REGISTER_LOGICAL_LAN"}, \
- {0x118, "H_FREE_LOGICAL_LAN"}, \
- {0x11c, "H_ADD_LOGICAL_LAN_BUFFER"}, \
- {0x120, "H_SEND_LOGICAL_LAN"}, \
- {0x124, "H_BULK_REMOVE"}, \
- {0x130, "H_MULTICAST_CTRL"}, \
- {0x134, "H_SET_XDABR"}, \
- {0x138, "H_STUFF_TCE"}, \
- {0x13c, "H_PUT_TCE_INDIRECT"}, \
- {0x14c, "H_CHANGE_LOGICAL_LAN_MAC"}, \
- {0x150, "H_VTERM_PARTNER_INFO"}, \
- {0x154, "H_REGISTER_VTERM"}, \
- {0x158, "H_FREE_VTERM"}, \
- {0x15c, "H_RESET_EVENTS"}, \
- {0x160, "H_ALLOC_RESOURCE"}, \
- {0x164, "H_FREE_RESOURCE"}, \
- {0x168, "H_MODIFY_QP"}, \
- {0x16c, "H_QUERY_QP"}, \
- {0x170, "H_REREGISTER_PMR"}, \
- {0x174, "H_REGISTER_SMR"}, \
- {0x178, "H_QUERY_MR"}, \
- {0x17c, "H_QUERY_MW"}, \
- {0x180, "H_QUERY_HCA"}, \
- {0x184, "H_QUERY_PORT"}, \
- {0x188, "H_MODIFY_PORT"}, \
- {0x18c, "H_DEFINE_AQP1"}, \
- {0x190, "H_GET_TRACE_BUFFER"}, \
- {0x194, "H_DEFINE_AQP0"}, \
- {0x198, "H_RESIZE_MR"}, \
- {0x19c, "H_ATTACH_MCQP"}, \
- {0x1a0, "H_DETACH_MCQP"}, \
- {0x1a4, "H_CREATE_RPT"}, \
- {0x1a8, "H_REMOVE_RPT"}, \
- {0x1ac, "H_REGISTER_RPAGES"}, \
- {0x1b0, "H_DISABLE_AND_GET"}, \
- {0x1b4, "H_ERROR_DATA"}, \
- {0x1b8, "H_GET_HCA_INFO"}, \
- {0x1bc, "H_GET_PERF_COUNT"}, \
- {0x1c0, "H_MANAGE_TRACE"}, \
- {0x1d4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
- {0x1d8, "H_POLL_PENDING"}, \
- {0x1e4, "H_QUERY_INT_STATE"}, \
- {0x244, "H_ILLAN_ATTRIBUTES"}, \
- {0x250, "H_MODIFY_HEA_QP"}, \
- {0x254, "H_QUERY_HEA_QP"}, \
- {0x258, "H_QUERY_HEA"}, \
- {0x25c, "H_QUERY_HEA_PORT"}, \
- {0x260, "H_MODIFY_HEA_PORT"}, \
- {0x264, "H_REG_BCMC"}, \
- {0x268, "H_DEREG_BCMC"}, \
- {0x26c, "H_REGISTER_HEA_RPAGES"}, \
- {0x270, "H_DISABLE_AND_GET_HEA"}, \
- {0x274, "H_GET_HEA_INFO"}, \
- {0x278, "H_ALLOC_HEA_RESOURCE"}, \
- {0x284, "H_ADD_CONN"}, \
- {0x288, "H_DEL_CONN"}, \
- {0x298, "H_JOIN"}, \
- {0x2a4, "H_VASI_STATE"}, \
- {0x2b0, "H_ENABLE_CRQ"}, \
- {0x2b8, "H_GET_EM_PARMS"}, \
- {0x2d0, "H_SET_MPP"}, \
- {0x2d4, "H_GET_MPP"}, \
- {0x2ec, "H_HOME_NODE_ASSOCIATIVITY"}, \
- {0x2f4, "H_BEST_ENERGY"}, \
- {0x2fc, "H_XIRR_X"}, \
- {0x300, "H_RANDOM"}, \
- {0x304, "H_COP"}, \
- {0x314, "H_GET_MPP_X"}, \
- {0x31c, "H_SET_MODE"}, \
- {0xf000, "H_RTAS"} \
-
-#endif
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_PERF_BOOK3S_HV_EXITS_H
-#define ARCH_PERF_BOOK3S_HV_EXITS_H
-
-/*
- * PowerPC Interrupt vectors : exit code to name mapping
- */
-
-#define kvm_trace_symbol_exit \
- {0x0, "RETURN_TO_HOST"}, \
- {0x100, "SYSTEM_RESET"}, \
- {0x200, "MACHINE_CHECK"}, \
- {0x300, "DATA_STORAGE"}, \
- {0x380, "DATA_SEGMENT"}, \
- {0x400, "INST_STORAGE"}, \
- {0x480, "INST_SEGMENT"}, \
- {0x500, "EXTERNAL"}, \
- {0x502, "EXTERNAL_HV"}, \
- {0x600, "ALIGNMENT"}, \
- {0x700, "PROGRAM"}, \
- {0x800, "FP_UNAVAIL"}, \
- {0x900, "DECREMENTER"}, \
- {0x980, "HV_DECREMENTER"}, \
- {0xc00, "SYSCALL"}, \
- {0xd00, "TRACE"}, \
- {0xe00, "H_DATA_STORAGE"}, \
- {0xe20, "H_INST_STORAGE"}, \
- {0xe40, "H_EMUL_ASSIST"}, \
- {0xf00, "PERFMON"}, \
- {0xf20, "ALTIVEC"}, \
- {0xf40, "VSX"}
-
-#endif
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include "util/kvm-stat.h"
-#include "util/parse-events.h"
-#include "util/debug.h"
-#include "util/evsel.h"
-#include "util/evlist.h"
-#include "util/pmus.h"
-
-#include "book3s_hv_exits.h"
-#include "book3s_hcalls.h"
-#include <subcmd/parse-options.h>
-
-#define NR_TPS 4
-
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter";
-const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit";
-
-define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit);
-define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall);
-
-/* Tracepoints specific to ppc_book3s_hv */
-const char *ppc_book3s_hv_kvm_tp[] = {
- "kvm_hv:kvm_guest_enter",
- "kvm_hv:kvm_guest_exit",
- "kvm_hv:kvm_hcall_enter",
- "kvm_hv:kvm_hcall_exit",
- NULL,
-};
-
-/* 1 extra placeholder for NULL */
-const char *kvm_events_tp[NR_TPS + 1];
-const char *kvm_exit_reason;
-
-static void hcall_event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->info = 0;
- key->key = evsel__intval(evsel, sample, "req");
-}
-
-static const char *get_hcall_exit_reason(u64 exit_code)
-{
- struct exit_reasons_table *tbl = hcall_reasons;
-
- while (tbl->reason != NULL) {
- if (tbl->exit_code == exit_code)
- return tbl->reason;
- tbl++;
- }
-
- pr_debug("Unknown hcall code: %lld\n",
- (unsigned long long)exit_code);
- return "UNKNOWN";
-}
-
-static bool hcall_event_end(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return (evsel__name_is(evsel, kvm_events_tp[3]));
-}
-
-static bool hcall_event_begin(struct evsel *evsel,
- struct perf_sample *sample, struct event_key *key)
-{
- if (evsel__name_is(evsel, kvm_events_tp[2])) {
- hcall_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
- struct event_key *key,
- char *decode)
-{
- const char *hcall_reason = get_hcall_exit_reason(key->key);
-
- scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", hcall_reason);
-}
-
-static struct kvm_events_ops hcall_events = {
- .is_begin_event = hcall_event_begin,
- .is_end_event = hcall_event_end,
- .decode_key = hcall_event_decode_key,
- .name = "HCALL-EVENT",
-};
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = exit_event_begin,
- .is_end_event = exit_event_end,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- { .name = "vmexit", .ops = &exit_events },
- { .name = "hcall", .ops = &hcall_events },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- NULL,
-};
-
-
-static int is_tracepoint_available(const char *str, struct evlist *evlist)
-{
- struct parse_events_error err;
- int ret;
-
- parse_events_error__init(&err);
- ret = parse_events(evlist, str, &err);
- if (ret)
- parse_events_error__print(&err, "tracepoint");
- parse_events_error__exit(&err);
- return ret;
-}
-
-static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
- struct evlist *evlist)
-{
- const char **events_ptr;
- int i, nr_tp = 0, err = -1;
-
- /* Check for book3s_hv tracepoints */
- for (events_ptr = ppc_book3s_hv_kvm_tp; *events_ptr; events_ptr++) {
- err = is_tracepoint_available(*events_ptr, evlist);
- if (err)
- return -1;
- nr_tp++;
- }
-
- for (i = 0; i < nr_tp; i++)
- kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
-
- kvm_events_tp[i] = NULL;
- kvm_exit_reason = "trap";
- kvm->exit_reasons = hv_exit_reasons;
- kvm->exit_reasons_isa = "HV";
-
- return 0;
-}
-
-/* Wrapper to setup kvm tracepoints */
-static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
-{
- struct evlist *evlist = evlist__new();
-
- if (evlist == NULL)
- return -ENOMEM;
-
- /* Right now, only supported on book3s_hv */
- return ppc__setup_book3s_hv(kvm, evlist);
-}
-
-int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
-{
- return ppc__setup_kvm_tp(kvm);
-}
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
-{
- int ret;
-
- ret = ppc__setup_kvm_tp(kvm);
- if (ret) {
- kvm->exit_reasons = NULL;
- kvm->exit_reasons_isa = NULL;
- }
-
- return ret;
-}
-
-/*
- * In case of powerpc architecture, pmu registers are programmable
- * by guest kernel. So monitoring guest via host may not provide
- * valid samples with default 'cycles' event. It is better to use
- * 'trace_imc/trace_cycles' event for guest profiling, since it
- * can track the guest instruction pointer in the trace-record.
- *
- * Function to parse the arguments and return appropriate values.
- */
-int kvm_add_default_arch_event(int *argc, const char **argv)
-{
- const char **tmp;
- bool event = false;
- int i, j = *argc;
-
- const struct option event_options[] = {
- OPT_BOOLEAN('e', "event", &event, NULL),
- OPT_END()
- };
-
- tmp = calloc(j + 1, sizeof(char *));
- if (!tmp)
- return -EINVAL;
-
- for (i = 0; i < j; i++)
- tmp[i] = argv[i];
-
- parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
- if (!event) {
- if (perf_pmus__have_event("trace_imc", "trace_cycles")) {
- argv[j++] = strdup("-e");
- argv[j++] = strdup("trace_imc/trace_cycles/");
- *argc += 2;
- } else {
- free(tmp);
- return -EINVAL;
- }
- }
-
- free(tmp);
- return 0;
-}
# SPDX-License-Identifier: GPL-2.0
PERF_HAVE_JITDUMP := 1
-HAVE_KVM_STAT_SUPPORT := 1
perf-util-y += perf_regs.o
perf-util-y += header.o
-
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Arch specific functions for perf kvm stat.
- *
- * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd.
- *
- */
-#include <errno.h>
-#include <memory.h>
-#include "../../../util/evsel.h"
-#include "../../../util/kvm-stat.h"
-#include "riscv_trap_types.h"
-#include "debug.h"
-
-define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_trap_class);
-
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "scause";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-
-const char *kvm_events_tp[] = {
- "kvm:kvm_entry",
- "kvm:kvm_exit",
- NULL,
-};
-
-static void event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason) & ~CAUSE_IRQ_FLAG;
- key->exit_reasons = riscv_exit_reasons;
-}
-
-static bool event_begin(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return evsel__name_is(evsel, kvm_entry_trace);
-}
-
-static bool event_end(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- if (evsel__name_is(evsel, kvm_exit_trace)) {
- event_get_key(evsel, sample, key);
- return true;
- }
- return false;
-}
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = event_begin,
- .is_end_event = event_end,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- {
- .name = "vmexit",
- .ops = &exit_events,
- },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- NULL,
-};
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
-{
- kvm->exit_reasons_isa = "riscv64";
- return 0;
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#ifndef ARCH_PERF_RISCV_TRAP_TYPES_H
-#define ARCH_PERF_RISCV_TRAP_TYPES_H
-
-/* Exception cause high bit - is an interrupt if set */
-#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
-
-/* Interrupt causes (minus the high bit) */
-#define IRQ_S_SOFT 1
-#define IRQ_VS_SOFT 2
-#define IRQ_M_SOFT 3
-#define IRQ_S_TIMER 5
-#define IRQ_VS_TIMER 6
-#define IRQ_M_TIMER 7
-#define IRQ_S_EXT 9
-#define IRQ_VS_EXT 10
-#define IRQ_M_EXT 11
-#define IRQ_S_GEXT 12
-#define IRQ_PMU_OVF 13
-
-/* Exception causes */
-#define EXC_INST_MISALIGNED 0
-#define EXC_INST_ACCESS 1
-#define EXC_INST_ILLEGAL 2
-#define EXC_BREAKPOINT 3
-#define EXC_LOAD_MISALIGNED 4
-#define EXC_LOAD_ACCESS 5
-#define EXC_STORE_MISALIGNED 6
-#define EXC_STORE_ACCESS 7
-#define EXC_SYSCALL 8
-#define EXC_HYPERVISOR_SYSCALL 9
-#define EXC_SUPERVISOR_SYSCALL 10
-#define EXC_INST_PAGE_FAULT 12
-#define EXC_LOAD_PAGE_FAULT 13
-#define EXC_STORE_PAGE_FAULT 15
-#define EXC_INST_GUEST_PAGE_FAULT 20
-#define EXC_LOAD_GUEST_PAGE_FAULT 21
-#define EXC_VIRTUAL_INST_FAULT 22
-#define EXC_STORE_GUEST_PAGE_FAULT 23
-
-#define TRAP(x) { x, #x }
-
-#define kvm_riscv_trap_class \
- TRAP(IRQ_S_SOFT), TRAP(IRQ_VS_SOFT), TRAP(IRQ_M_SOFT), \
- TRAP(IRQ_S_TIMER), TRAP(IRQ_VS_TIMER), TRAP(IRQ_M_TIMER), \
- TRAP(IRQ_S_EXT), TRAP(IRQ_VS_EXT), TRAP(IRQ_M_EXT), \
- TRAP(IRQ_S_GEXT), TRAP(IRQ_PMU_OVF), \
- TRAP(EXC_INST_MISALIGNED), TRAP(EXC_INST_ACCESS), TRAP(EXC_INST_ILLEGAL), \
- TRAP(EXC_BREAKPOINT), TRAP(EXC_LOAD_MISALIGNED), TRAP(EXC_LOAD_ACCESS), \
- TRAP(EXC_STORE_MISALIGNED), TRAP(EXC_STORE_ACCESS), TRAP(EXC_SYSCALL), \
- TRAP(EXC_HYPERVISOR_SYSCALL), TRAP(EXC_SUPERVISOR_SYSCALL), \
- TRAP(EXC_INST_PAGE_FAULT), TRAP(EXC_LOAD_PAGE_FAULT), \
- TRAP(EXC_STORE_PAGE_FAULT), TRAP(EXC_INST_GUEST_PAGE_FAULT), \
- TRAP(EXC_LOAD_GUEST_PAGE_FAULT), TRAP(EXC_VIRTUAL_INST_FAULT), \
- TRAP(EXC_STORE_GUEST_PAGE_FAULT)
-
-#endif /* ARCH_PERF_RISCV_TRAP_TYPES_H */
# SPDX-License-Identifier: GPL-2.0-only
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += machine.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Arch specific functions for perf kvm stat.
- *
- * Copyright 2014 IBM Corp.
- * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- */
-
-#include <errno.h>
-#include <string.h>
-#include "../../util/kvm-stat.h"
-#include "../../util/evsel.h"
-#include <asm/sie.h>
-
-define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
-define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
-define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
-define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
-define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
-
-const char *vcpu_id_str = "id";
-const char *kvm_exit_reason = "icptcode";
-const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
-const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
-
-static void event_icpt_insn_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- unsigned long insn;
-
- insn = evsel__intval(evsel, sample, "instruction");
- key->key = icpt_insn_decoder(insn);
- key->exit_reasons = sie_icpt_insn_codes;
-}
-
-static void event_sigp_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "order_code");
- key->exit_reasons = sie_sigp_order_codes;
-}
-
-static void event_diag_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "code");
- key->exit_reasons = sie_diagnose_codes;
-}
-
-static void event_icpt_prog_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "code");
- key->exit_reasons = sie_icpt_prog_codes;
-}
-
-static struct child_event_ops child_events[] = {
- { .name = "kvm:kvm_s390_intercept_instruction",
- .get_key = event_icpt_insn_get_key },
- { .name = "kvm:kvm_s390_handle_sigp",
- .get_key = event_sigp_get_key },
- { .name = "kvm:kvm_s390_handle_diag",
- .get_key = event_diag_get_key },
- { .name = "kvm:kvm_s390_intercept_prog",
- .get_key = event_icpt_prog_get_key },
- { NULL, NULL },
-};
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = exit_event_begin,
- .is_end_event = exit_event_end,
- .child_ops = child_events,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-const char *kvm_events_tp[] = {
- "kvm:kvm_s390_sie_enter",
- "kvm:kvm_s390_sie_exit",
- "kvm:kvm_s390_intercept_instruction",
- "kvm:kvm_s390_handle_sigp",
- "kvm:kvm_s390_handle_diag",
- "kvm:kvm_s390_intercept_prog",
- NULL,
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- { .name = "vmexit", .ops = &exit_events },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- "Wait state",
- NULL,
-};
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
-{
- if (strstr(cpuid, "IBM")) {
- kvm->exit_reasons = sie_exit_reasons;
- kvm->exit_reasons_isa = "SIE";
- } else
- return -ENOTSUP;
-
- return 0;
-}
# SPDX-License-Identifier: GPL-2.0
-HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_JITDUMP := 1
perf-util-y += header.o
perf-util-y += tsc.o
perf-util-y += pmu.o
-perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-util-y += perf_regs.o
perf-util-y += topdown.o
perf-util-y += machine.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <string.h>
-#include "../../../util/kvm-stat.h"
-#include "../../../util/evsel.h"
-#include "../../../util/env.h"
-#include <asm/svm.h>
-#include <asm/vmx.h>
-#include <asm/kvm.h>
-#include <subcmd/parse-options.h>
-
-define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
-define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
-
-static struct kvm_events_ops exit_events = {
- .is_begin_event = exit_event_begin,
- .is_end_event = exit_event_end,
- .decode_key = exit_event_decode_key,
- .name = "VM-EXIT"
-};
-
-const char *vcpu_id_str = "vcpu_id";
-const char *kvm_exit_reason = "exit_reason";
-const char *kvm_entry_trace = "kvm:kvm_entry";
-const char *kvm_exit_trace = "kvm:kvm_exit";
-
-/*
- * For the mmio events, we treat:
- * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
- * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
- */
-static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "gpa");
- key->info = evsel__intval(evsel, sample, "type");
-}
-
-#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
-#define KVM_TRACE_MMIO_READ 1
-#define KVM_TRACE_MMIO_WRITE 2
-
-static bool mmio_event_begin(struct evsel *evsel,
- struct perf_sample *sample, struct event_key *key)
-{
- /* MMIO read begin event in kernel. */
- if (kvm_exit_event(evsel))
- return true;
-
- /* MMIO write begin event in kernel. */
- if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
- evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
- mmio_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-
-static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample,
- struct event_key *key)
-{
- /* MMIO write end event in kernel. */
- if (kvm_entry_event(evsel))
- return true;
-
- /* MMIO read end event in kernel.*/
- if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
- evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
- mmio_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-
-static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
- struct event_key *key,
- char *decode)
-{
- scnprintf(decode, KVM_EVENT_NAME_LEN, "%#lx:%s",
- (unsigned long)key->key,
- key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
-}
-
-static struct kvm_events_ops mmio_events = {
- .is_begin_event = mmio_event_begin,
- .is_end_event = mmio_event_end,
- .decode_key = mmio_event_decode_key,
- .name = "MMIO Access"
-};
-
- /* The time of emulation pio access is from kvm_pio to kvm_entry. */
-static void ioport_event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "port");
- key->info = evsel__intval(evsel, sample, "rw");
-}
-
-static bool ioport_event_begin(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- if (evsel__name_is(evsel, "kvm:kvm_pio")) {
- ioport_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-
-static bool ioport_event_end(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return kvm_entry_event(evsel);
-}
-
-static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
- struct event_key *key,
- char *decode)
-{
- scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
- (unsigned long long)key->key,
- key->info ? "POUT" : "PIN");
-}
-
-static struct kvm_events_ops ioport_events = {
- .is_begin_event = ioport_event_begin,
- .is_end_event = ioport_event_end,
- .decode_key = ioport_event_decode_key,
- .name = "IO Port Access"
-};
-
- /* The time of emulation msr is from kvm_msr to kvm_entry. */
-static void msr_event_get_key(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- key->key = evsel__intval(evsel, sample, "ecx");
- key->info = evsel__intval(evsel, sample, "write");
-}
-
-static bool msr_event_begin(struct evsel *evsel,
- struct perf_sample *sample,
- struct event_key *key)
-{
- if (evsel__name_is(evsel, "kvm:kvm_msr")) {
- msr_event_get_key(evsel, sample, key);
- return true;
- }
-
- return false;
-}
-
-static bool msr_event_end(struct evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct event_key *key __maybe_unused)
-{
- return kvm_entry_event(evsel);
-}
-
-static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
- struct event_key *key,
- char *decode)
-{
- scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
- (unsigned long long)key->key,
- key->info ? "W" : "R");
-}
-
-static struct kvm_events_ops msr_events = {
- .is_begin_event = msr_event_begin,
- .is_end_event = msr_event_end,
- .decode_key = msr_event_decode_key,
- .name = "MSR Access"
-};
-
-const char *kvm_events_tp[] = {
- "kvm:kvm_entry",
- "kvm:kvm_exit",
- "kvm:kvm_mmio",
- "kvm:kvm_pio",
- "kvm:kvm_msr",
- NULL,
-};
-
-struct kvm_reg_events_ops kvm_reg_events_ops[] = {
- { .name = "vmexit", .ops = &exit_events },
- { .name = "mmio", .ops = &mmio_events },
- { .name = "ioport", .ops = &ioport_events },
- { .name = "msr", .ops = &msr_events },
- { NULL, NULL },
-};
-
-const char * const kvm_skip_events[] = {
- "HLT",
- NULL,
-};
-
-int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
-{
- if (strstr(cpuid, "Intel")) {
- kvm->exit_reasons = vmx_exit_reasons;
- kvm->exit_reasons_isa = "VMX";
- } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
- kvm->exit_reasons = svm_exit_reasons;
- kvm->exit_reasons_isa = "SVM";
- } else
- return -ENOTSUP;
-
- return 0;
-}
-
-/*
- * After KVM supports PEBS for guest on Intel platforms
- * (https://lore.kernel.org/all/20220411101946.20262-1-likexu@tencent.com/),
- * host loses the capability to sample guest with PEBS since all PEBS related
- * MSRs are switched to guest value after vm-entry, like IA32_DS_AREA MSR is
- * switched to guest GVA at vm-entry. This would lead to "perf kvm record"
- * fails to sample guest on Intel platforms since "cycles:P" event is used to
- * sample guest by default.
- *
- * So, to avoid this issue explicitly use "cycles" instead of "cycles:P" event
- * by default to sample guest on Intel platforms.
- */
-int kvm_add_default_arch_event(int *argc, const char **argv)
-{
- const char **tmp;
- bool event = false;
- int ret = 0, i, j = *argc;
-
- const struct option event_options[] = {
- OPT_BOOLEAN('e', "event", &event, NULL),
- OPT_BOOLEAN(0, "pfm-events", &event, NULL),
- OPT_END()
- };
-
- if (!x86__is_intel_cpu())
- return 0;
-
- tmp = calloc(j + 1, sizeof(char *));
- if (!tmp)
- return -ENOMEM;
-
- for (i = 0; i < j; i++)
- tmp[i] = argv[i];
-
- parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
- if (!event) {
- argv[j++] = STRDUP_FAIL_EXIT("-e");
- argv[j++] = STRDUP_FAIL_EXIT("cycles");
- *argc += 2;
- }
-
- free(tmp);
- return 0;
-
-EXIT:
- free(tmp);
- return ret;
-}
#include <math.h>
#include <perf/mmap.h>
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
#define GET_EVENT_KEY(func, field) \
static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
{ \
#endif /* HAVE_SLANG_SUPPORT */
-#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#endif // defined(HAVE_LIBTRACEEVENT)
static const char *get_filename_for_perf_kvm(void)
{
return filename;
}
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
{
- struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
+ const struct kvm_reg_events_ops *events_ops;
- for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
+ for (events_ops = kvm_reg_events_ops(); events_ops->name; events_ops++) {
if (!strcmp(events_ops->name, kvm->report_event)) {
kvm->events_ops = events_ops->ops;
return true;
struct perf_sample *sample,
struct event_key *key)
{
- struct child_event_ops *child_ops;
+ const struct child_event_ops *child_ops;
child_ops = kvm->events_ops->child_ops;
{
const char * const *skip_events;
- for (skip_events = kvm_skip_events; *skip_events; skip_events++)
+ for (skip_events = kvm_skip_events(); *skip_events; skip_events++)
if (!strcmp(event, *skip_events))
return true;
return NULL;
}
- vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
+ vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str());
thread__set_priv(thread, vcpu_record);
}
return ret;
}
-int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
-{
- return 0;
-}
-
static int
kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
return ret;
}
- for (events_tp = kvm_events_tp; *events_tp; events_tp++)
+ for (events_tp = kvm_events_tp(); *events_tp; events_tp++)
events_tp_size++;
rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
for (j = 0; j < events_tp_size; j++) {
rec_argv[i++] = STRDUP_FAIL_EXIT("-e");
- rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
+ rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp()[j]);
}
rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
if (evlist == NULL)
return NULL;
- for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
+ for (events_tp = kvm_events_tp(); *events_tp; events_tp++) {
tp = strdup(*events_tp);
if (tp == NULL)
perf_stat:
return cmd_stat(argc, argv);
}
-#endif /* HAVE_KVM_STAT_SUPPORT */
-
-int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
- const char **argv __maybe_unused)
-{
- return 0;
-}
+#endif /* HAVE_LIBTRACEEVENT */
static int __cmd_record(const char *file_name, int argc, const char **argv)
{
return __cmd_top(argc, argv);
else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#if defined(HAVE_LIBTRACEEVENT)
else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
return kvm_cmd_stat(file_name, argc, argv);
#endif
perf-util-y += topdown.o
perf-util-y += iostat.o
perf-util-y += stream.o
-perf-util-y += kvm-stat.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
+perf-util-y += kvm-stat-arch/
perf-util-y += lock-contention.o
perf-util-y += auxtrace.o
perf-util-y += intel-pt-decoder/
--- /dev/null
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-arm64.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-loongarch.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-powerpc.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-riscv.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-s390.o
+perf-util-$(CONFIG_LIBTRACEEVENT) += kvm-stat-x86.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#ifndef ARCH_PERF_ARM64_EXCEPTION_TYPES_H
+#define ARCH_PERF_ARM64_EXCEPTION_TYPES_H
+
+/* Per asm/virt.h */
+#define HVC_STUB_ERR 0xbadca11
+
+/* Per asm/kvm_asm.h */
+#define ARM_EXCEPTION_IRQ 0
+#define ARM_EXCEPTION_EL1_SERROR 1
+#define ARM_EXCEPTION_TRAP 2
+#define ARM_EXCEPTION_IL 3
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
+
+#define kvm_arm_exception_type \
+ {ARM_EXCEPTION_IRQ, "IRQ" }, \
+ {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
+ {ARM_EXCEPTION_TRAP, "TRAP" }, \
+ {ARM_EXCEPTION_IL, "ILLEGAL" }, \
+ {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
+
+/* Per asm/esr.h */
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
+#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
+#define ESR_ELx_EC_OTHER (0x0A)
+/* Unallocated EC: 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+#define ESR_ELx_EC_BTI (0x0D)
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
+#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
+#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
+#define ESR_ELx_EC_SYS64 (0x18)
+#define ESR_ELx_EC_SVE (0x19)
+#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+#define ESR_ELx_EC_SME (0x1D)
+/* Unallocated EC: 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+#define ESR_ELx_EC_MOPS (0x27)
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+#define ESR_ELx_EC_GCS (0x2D)
+/* Unallocated EC: 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
+/* Unallocated EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
+
+#define ECN(x) { ESR_ELx_EC_##x, #x }
+
+#define kvm_arm_exception_class \
+ ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
+ ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
+ ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
+ ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
+ ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
+ ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
+ ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
+ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
+ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
+
+#endif /* ARCH_PERF_ARM64_EXCEPTION_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_PERF_BOOK3S_HV_HCALLS_H
+#define ARCH_PERF_BOOK3S_HV_HCALLS_H
+
+/*
+ * PowerPC HCALL codes : hcall code to name mapping
+ */
+#define kvm_trace_symbol_hcall \
+ {0x4, "H_REMOVE"}, \
+ {0x8, "H_ENTER"}, \
+ {0xc, "H_READ"}, \
+ {0x10, "H_CLEAR_MOD"}, \
+ {0x14, "H_CLEAR_REF"}, \
+ {0x18, "H_PROTECT"}, \
+ {0x1c, "H_GET_TCE"}, \
+ {0x20, "H_PUT_TCE"}, \
+ {0x24, "H_SET_SPRG0"}, \
+ {0x28, "H_SET_DABR"}, \
+ {0x2c, "H_PAGE_INIT"}, \
+ {0x30, "H_SET_ASR"}, \
+ {0x34, "H_ASR_ON"}, \
+ {0x38, "H_ASR_OFF"}, \
+ {0x3c, "H_LOGICAL_CI_LOAD"}, \
+ {0x40, "H_LOGICAL_CI_STORE"}, \
+ {0x44, "H_LOGICAL_CACHE_LOAD"}, \
+ {0x48, "H_LOGICAL_CACHE_STORE"}, \
+ {0x4c, "H_LOGICAL_ICBI"}, \
+ {0x50, "H_LOGICAL_DCBF"}, \
+ {0x54, "H_GET_TERM_CHAR"}, \
+ {0x58, "H_PUT_TERM_CHAR"}, \
+ {0x5c, "H_REAL_TO_LOGICAL"}, \
+ {0x60, "H_HYPERVISOR_DATA"}, \
+ {0x64, "H_EOI"}, \
+ {0x68, "H_CPPR"}, \
+ {0x6c, "H_IPI"}, \
+ {0x70, "H_IPOLL"}, \
+ {0x74, "H_XIRR"}, \
+ {0x78, "H_MIGRATE_DMA"}, \
+ {0x7c, "H_PERFMON"}, \
+ {0xdc, "H_REGISTER_VPA"}, \
+ {0xe0, "H_CEDE"}, \
+ {0xe4, "H_CONFER"}, \
+ {0xe8, "H_PROD"}, \
+ {0xec, "H_GET_PPP"}, \
+ {0xf0, "H_SET_PPP"}, \
+ {0xf4, "H_PURR"}, \
+ {0xf8, "H_PIC"}, \
+ {0xfc, "H_REG_CRQ"}, \
+ {0x100, "H_FREE_CRQ"}, \
+ {0x104, "H_VIO_SIGNAL"}, \
+ {0x108, "H_SEND_CRQ"}, \
+ {0x110, "H_COPY_RDMA"}, \
+ {0x114, "H_REGISTER_LOGICAL_LAN"}, \
+ {0x118, "H_FREE_LOGICAL_LAN"}, \
+ {0x11c, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {0x120, "H_SEND_LOGICAL_LAN"}, \
+ {0x124, "H_BULK_REMOVE"}, \
+ {0x130, "H_MULTICAST_CTRL"}, \
+ {0x134, "H_SET_XDABR"}, \
+ {0x138, "H_STUFF_TCE"}, \
+ {0x13c, "H_PUT_TCE_INDIRECT"}, \
+ {0x14c, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {0x150, "H_VTERM_PARTNER_INFO"}, \
+ {0x154, "H_REGISTER_VTERM"}, \
+ {0x158, "H_FREE_VTERM"}, \
+ {0x15c, "H_RESET_EVENTS"}, \
+ {0x160, "H_ALLOC_RESOURCE"}, \
+ {0x164, "H_FREE_RESOURCE"}, \
+ {0x168, "H_MODIFY_QP"}, \
+ {0x16c, "H_QUERY_QP"}, \
+ {0x170, "H_REREGISTER_PMR"}, \
+ {0x174, "H_REGISTER_SMR"}, \
+ {0x178, "H_QUERY_MR"}, \
+ {0x17c, "H_QUERY_MW"}, \
+ {0x180, "H_QUERY_HCA"}, \
+ {0x184, "H_QUERY_PORT"}, \
+ {0x188, "H_MODIFY_PORT"}, \
+ {0x18c, "H_DEFINE_AQP1"}, \
+ {0x190, "H_GET_TRACE_BUFFER"}, \
+ {0x194, "H_DEFINE_AQP0"}, \
+ {0x198, "H_RESIZE_MR"}, \
+ {0x19c, "H_ATTACH_MCQP"}, \
+ {0x1a0, "H_DETACH_MCQP"}, \
+ {0x1a4, "H_CREATE_RPT"}, \
+ {0x1a8, "H_REMOVE_RPT"}, \
+ {0x1ac, "H_REGISTER_RPAGES"}, \
+ {0x1b0, "H_DISABLE_AND_GET"}, \
+ {0x1b4, "H_ERROR_DATA"}, \
+ {0x1b8, "H_GET_HCA_INFO"}, \
+ {0x1bc, "H_GET_PERF_COUNT"}, \
+ {0x1c0, "H_MANAGE_TRACE"}, \
+ {0x1d4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {0x1d8, "H_POLL_PENDING"}, \
+ {0x1e4, "H_QUERY_INT_STATE"}, \
+ {0x244, "H_ILLAN_ATTRIBUTES"}, \
+ {0x250, "H_MODIFY_HEA_QP"}, \
+ {0x254, "H_QUERY_HEA_QP"}, \
+ {0x258, "H_QUERY_HEA"}, \
+ {0x25c, "H_QUERY_HEA_PORT"}, \
+ {0x260, "H_MODIFY_HEA_PORT"}, \
+ {0x264, "H_REG_BCMC"}, \
+ {0x268, "H_DEREG_BCMC"}, \
+ {0x26c, "H_REGISTER_HEA_RPAGES"}, \
+ {0x270, "H_DISABLE_AND_GET_HEA"}, \
+ {0x274, "H_GET_HEA_INFO"}, \
+ {0x278, "H_ALLOC_HEA_RESOURCE"}, \
+ {0x284, "H_ADD_CONN"}, \
+ {0x288, "H_DEL_CONN"}, \
+ {0x298, "H_JOIN"}, \
+ {0x2a4, "H_VASI_STATE"}, \
+ {0x2b0, "H_ENABLE_CRQ"}, \
+ {0x2b8, "H_GET_EM_PARMS"}, \
+ {0x2d0, "H_SET_MPP"}, \
+ {0x2d4, "H_GET_MPP"}, \
+ {0x2ec, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {0x2f4, "H_BEST_ENERGY"}, \
+ {0x2fc, "H_XIRR_X"}, \
+ {0x300, "H_RANDOM"}, \
+ {0x304, "H_COP"}, \
+ {0x314, "H_GET_MPP_X"}, \
+ {0x31c, "H_SET_MODE"}, \
+ {0xf000, "H_RTAS"} \
+
+#endif
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_PERF_BOOK3S_HV_EXITS_H
+#define ARCH_PERF_BOOK3S_HV_EXITS_H
+
+/*
+ * PowerPC Interrupt vectors : exit code to name mapping
+ */
+
+#define kvm_trace_symbol_exit \
+ {0x0, "RETURN_TO_HOST"}, \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <memory.h>
+#include "../debug.h"
+#include "../evsel.h"
+#include "../kvm-stat.h"
+#include "arm64_exception_types.h"
+
+define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
+define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
+
+static const char *kvm_trap_exit_reason = "esr_ec";
+
+static const char * const __kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ NULL,
+};
+
+static void event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason());
+ key->exit_reasons = arm64_exit_reasons;
+
+ /*
+ * TRAP exceptions carry exception class info in esr_ec field
+ * and, hence, we need to use a different exit_reasons table to
+ * properly decode event's est_ec.
+ */
+ if (key->key == ARM_EXCEPTION_TRAP) {
+ key->key = evsel__intval(evsel, sample, kvm_trap_exit_reason);
+ key->exit_reasons = arm64_trap_exit_reasons;
+ }
+}
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return evsel__name_is(evsel, kvm_entry_trace());
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, kvm_exit_trace())) {
+ event_get_key(evsel, sample, key);
+ return true;
+ }
+ return false;
+}
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ {
+ .name = "vmexit",
+ .ops = &exit_events,
+ },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ NULL,
+};
+
+int __cpu_isa_init_arm64(struct perf_kvm_stat *kvm)
+{
+ kvm->exit_reasons_isa = "arm64";
+ return 0;
+}
+
+const char * const *__kvm_events_tp_arm64(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_arm64(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_arm64(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <memory.h>
+#include "../kvm-stat.h"
+#include "../parse-events.h"
+#include "../debug.h"
+#include "../evsel.h"
+#include "../evlist.h"
+#include "../pmus.h"
+
+#define LOONGARCH_EXCEPTION_INT 0
+#define LOONGARCH_EXCEPTION_PIL 1
+#define LOONGARCH_EXCEPTION_PIS 2
+#define LOONGARCH_EXCEPTION_PIF 3
+#define LOONGARCH_EXCEPTION_PME 4
+#define LOONGARCH_EXCEPTION_FPD 15
+#define LOONGARCH_EXCEPTION_SXD 16
+#define LOONGARCH_EXCEPTION_ASXD 17
+#define LOONGARCH_EXCEPTION_GSPR 22
+#define LOONGARCH_EXCEPTION_CPUCFG 100
+#define LOONGARCH_EXCEPTION_CSR 101
+#define LOONGARCH_EXCEPTION_IOCSR 102
+#define LOONGARCH_EXCEPTION_IDLE 103
+#define LOONGARCH_EXCEPTION_OTHERS 104
+#define LOONGARCH_EXCEPTION_HVC 23
+
+#define loongarch_exception_type \
+ {LOONGARCH_EXCEPTION_INT, "Interrupt" }, \
+ {LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \
+ {LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \
+ {LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \
+ {LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \
+ {LOONGARCH_EXCEPTION_FPD, "FPU" }, \
+ {LOONGARCH_EXCEPTION_SXD, "LSX" }, \
+ {LOONGARCH_EXCEPTION_ASXD, "LASX" }, \
+ {LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \
+ {LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \
+ {LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \
+ {LOONGARCH_EXCEPTION_CSR, "CSR" }, \
+ {LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \
+ {LOONGARCH_EXCEPTION_IDLE, "Idle" }, \
+ {LOONGARCH_EXCEPTION_OTHERS, "Others" }
+
+define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type);
+
+static const char *kvm_reenter_trace = "kvm:kvm_reenter";
+static const char * const __kvm_events_tp[] = {
+ "kvm:kvm_enter",
+ "kvm:kvm_reenter",
+ "kvm:kvm_exit",
+ "kvm:kvm_exit_gspr",
+ NULL,
+};
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ return exit_event_begin(evsel, sample, key);
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ /*
+ * LoongArch kvm is different with other architectures
+ *
+ * There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with
+ * kvm:kvm_exit event.
+ * kvm:kvm_enter means returning to vmm and then to guest
+ * kvm:kvm_reenter means returning to guest immediately
+ */
+ return evsel__name_is(evsel, kvm_entry_trace()) ||
+ evsel__name_is(evsel, kvm_reenter_trace);
+}
+
+static void event_gspr_get_key(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ unsigned int insn;
+
+ key->key = LOONGARCH_EXCEPTION_OTHERS;
+ insn = evsel__intval(evsel, sample, "inst_word");
+
+ switch (insn >> 24) {
+ case 0:
+ /* CPUCFG inst trap */
+ if ((insn >> 10) == 0x1b)
+ key->key = LOONGARCH_EXCEPTION_CPUCFG;
+ break;
+ case 4:
+ /* CSR inst trap */
+ key->key = LOONGARCH_EXCEPTION_CSR;
+ break;
+ case 6:
+ /* IOCSR inst trap */
+ if ((insn >> 15) == 0xc90)
+ key->key = LOONGARCH_EXCEPTION_IOCSR;
+ else if ((insn >> 15) == 0xc91)
+ /* Idle inst trap */
+ key->key = LOONGARCH_EXCEPTION_IDLE;
+ break;
+ default:
+ key->key = LOONGARCH_EXCEPTION_OTHERS;
+ break;
+ }
+}
+
+static const struct child_event_ops child_events[] = {
+ { .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key },
+ { NULL, NULL },
+};
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .child_ops = child_events,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events, },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ NULL,
+};
+
+int __cpu_isa_init_loongarch(struct perf_kvm_stat *kvm)
+{
+ kvm->exit_reasons_isa = "loongarch64";
+ kvm->exit_reasons = loongarch_exit_reasons;
+ return 0;
+}
+
+const char * const *__kvm_events_tp_loongarch(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_loongarch(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_loongarch(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include "../kvm-stat.h"
+#include "../parse-events.h"
+#include "../debug.h"
+#include "../evsel.h"
+#include "../evlist.h"
+#include "../pmus.h"
+
+#include "book3s_hv_exits.h"
+#include "book3s_hcalls.h"
+#include <subcmd/parse-options.h>
+
+#define NR_TPS 4
+
+define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit);
+define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall);
+
+/* Tracepoints specific to ppc_book3s_hv */
+static const char * const ppc_book3s_hv_kvm_tp[] = {
+ "kvm_hv:kvm_guest_enter",
+ "kvm_hv:kvm_guest_exit",
+ "kvm_hv:kvm_hcall_enter",
+ "kvm_hv:kvm_hcall_exit",
+ NULL,
+};
+
+/* 1 extra placeholder for NULL */
+static const char *__kvm_events_tp[NR_TPS + 1];
+
+static void hcall_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, "req");
+}
+
+static const char *get_hcall_exit_reason(u64 exit_code)
+{
+ struct exit_reasons_table *tbl = hcall_reasons;
+
+ while (tbl->reason != NULL) {
+ if (tbl->exit_code == exit_code)
+ return tbl->reason;
+ tbl++;
+ }
+
+ pr_debug("Unknown hcall code: %lld\n",
+ (unsigned long long)exit_code);
+ return "UNKNOWN";
+}
+
+static bool hcall_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return evsel__name_is(evsel, __kvm_events_tp[3]);
+}
+
+static bool hcall_event_begin(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ if (evsel__name_is(evsel, __kvm_events_tp[2])) {
+ hcall_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ const char *hcall_reason = get_hcall_exit_reason(key->key);
+
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", hcall_reason);
+}
+
+static const struct kvm_events_ops hcall_events = {
+ .is_begin_event = hcall_event_begin,
+ .is_end_event = hcall_event_end,
+ .decode_key = hcall_event_decode_key,
+ .name = "HCALL-EVENT",
+};
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events },
+ { .name = "hcall", .ops = &hcall_events },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ NULL,
+};
+
+
+static int is_tracepoint_available(const char *str, struct evlist *evlist)
+{
+ struct parse_events_error err;
+ int ret;
+
+ parse_events_error__init(&err);
+ ret = parse_events(evlist, str, &err);
+ if (ret)
+ parse_events_error__print(&err, "tracepoint");
+ parse_events_error__exit(&err);
+ return ret;
+}
+
+static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
+ struct evlist *evlist)
+{
+ const char * const *events_ptr;
+ int i, nr_tp = 0, err = -1;
+
+ /* Check for book3s_hv tracepoints */
+ for (events_ptr = ppc_book3s_hv_kvm_tp; *events_ptr; events_ptr++) {
+ err = is_tracepoint_available(*events_ptr, evlist);
+ if (err)
+ return -1;
+ nr_tp++;
+ }
+
+ for (i = 0; i < nr_tp; i++)
+ __kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
+
+ __kvm_events_tp[i] = NULL;
+ kvm->exit_reasons = hv_exit_reasons;
+ kvm->exit_reasons_isa = "HV";
+
+ return 0;
+}
+
+/* Wrapper to setup kvm tracepoints */
+static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
+{
+ struct evlist *evlist = evlist__new();
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ /* Right now, only supported on book3s_hv */
+ return ppc__setup_book3s_hv(kvm, evlist);
+}
+
+int __setup_kvm_events_tp_powerpc(struct perf_kvm_stat *kvm)
+{
+ return ppc__setup_kvm_tp(kvm);
+}
+
+int __cpu_isa_init_powerpc(struct perf_kvm_stat *kvm)
+{
+ int ret;
+
+ ret = ppc__setup_kvm_tp(kvm);
+ if (ret) {
+ kvm->exit_reasons = NULL;
+ kvm->exit_reasons_isa = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * In case of powerpc architecture, pmu registers are programmable
+ * by guest kernel. So monitoring guest via host may not provide
+ * valid samples with default 'cycles' event. It is better to use
+ * 'trace_imc/trace_cycles' event for guest profiling, since it
+ * can track the guest instruction pointer in the trace-record.
+ *
+ * Function to parse the arguments and return appropriate values.
+ */
+int __kvm_add_default_arch_event_powerpc(int *argc, const char **argv)
+{
+ const char **tmp;
+ bool event = false;
+ int i, j = *argc;
+
+ const struct option event_options[] = {
+ OPT_BOOLEAN('e', "event", &event, NULL),
+ OPT_END()
+ };
+
+ tmp = calloc(j + 1, sizeof(char *));
+ if (!tmp)
+ return -EINVAL;
+
+ for (i = 0; i < j; i++)
+ tmp[i] = argv[i];
+
+ parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
+ if (!event) {
+ if (perf_pmus__have_event("trace_imc", "trace_cycles")) {
+ argv[j++] = strdup("-e");
+ argv[j++] = strdup("trace_imc/trace_cycles/");
+ *argc += 2;
+ } else {
+ free(tmp);
+ return -EINVAL;
+ }
+ }
+
+ free(tmp);
+ return 0;
+}
+
+const char * const *__kvm_events_tp_powerpc(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_powerpc(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_powerpc(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arch specific functions for perf kvm stat.
+ *
+ * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd.
+ *
+ */
+#include <errno.h>
+#include <memory.h>
+#include "../evsel.h"
+#include "../kvm-stat.h"
+#include "riscv_trap_types.h"
+#include "debug.h"
+
+define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_trap_class);
+
+static const char * const __kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ NULL,
+};
+
+static void event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ int xlen = 64; // TODO: 32-bit support.
+
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason()) & ~CAUSE_IRQ_FLAG(xlen);
+ key->exit_reasons = riscv_exit_reasons;
+}
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return evsel__name_is(evsel, kvm_entry_trace());
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, kvm_exit_trace())) {
+ event_get_key(evsel, sample, key);
+ return true;
+ }
+ return false;
+}
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ {
+ .name = "vmexit",
+ .ops = &exit_events,
+ },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ NULL,
+};
+
+int __cpu_isa_init_riscv(struct perf_kvm_stat *kvm)
+{
+ kvm->exit_reasons_isa = "riscv64";
+ return 0;
+}
+
+const char * const *__kvm_events_tp_riscv(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_riscv(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_riscv(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Arch specific functions for perf kvm stat.
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ */
+
+#include <errno.h>
+#include <string.h>
+#include "../kvm-stat.h"
+#include "../evsel.h"
+#include "../../../arch/s390/include/uapi/asm/sie.h"
+
+define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
+define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
+define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
+define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
+define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
+
+static void event_icpt_insn_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ u64 insn;
+
+ insn = evsel__intval(evsel, sample, "instruction");
+ key->key = icpt_insn_decoder(insn);
+ key->exit_reasons = sie_icpt_insn_codes;
+}
+
+static void event_sigp_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "order_code");
+ key->exit_reasons = sie_sigp_order_codes;
+}
+
+static void event_diag_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "code");
+ key->exit_reasons = sie_diagnose_codes;
+}
+
+static void event_icpt_prog_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "code");
+ key->exit_reasons = sie_icpt_prog_codes;
+}
+
+static const struct child_event_ops child_events[] = {
+ { .name = "kvm:kvm_s390_intercept_instruction",
+ .get_key = event_icpt_insn_get_key },
+ { .name = "kvm:kvm_s390_handle_sigp",
+ .get_key = event_sigp_get_key },
+ { .name = "kvm:kvm_s390_handle_diag",
+ .get_key = event_diag_get_key },
+ { .name = "kvm:kvm_s390_intercept_prog",
+ .get_key = event_icpt_prog_get_key },
+ { NULL, NULL },
+};
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .child_ops = child_events,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+static const char * const __kvm_events_tp[] = {
+ "kvm:kvm_s390_sie_enter",
+ "kvm:kvm_s390_sie_exit",
+ "kvm:kvm_s390_intercept_instruction",
+ "kvm:kvm_s390_handle_sigp",
+ "kvm:kvm_s390_handle_diag",
+ "kvm:kvm_s390_intercept_prog",
+ NULL,
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ "Wait state",
+ NULL,
+};
+
+int __cpu_isa_init_s390(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+ if (strstr(cpuid, "IBM")) {
+ kvm->exit_reasons = sie_exit_reasons;
+ kvm->exit_reasons_isa = "SIE";
+ } else
+ return -ENOTSUP;
+
+ return 0;
+}
+
+const char * const *__kvm_events_tp_s390(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_s390(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_s390(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <string.h>
+#include "../kvm-stat.h"
+#include "../evsel.h"
+#include "../env.h"
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
+#include <subcmd/parse-options.h>
+
+define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
+define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
+
+static const struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+/*
+ * For the mmio events, we treat:
+ * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
+ * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
+ */
+static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "gpa");
+ key->info = evsel__intval(evsel, sample, "type");
+}
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+static bool mmio_event_begin(struct evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ /* MMIO read begin event in kernel. */
+ if (kvm_exit_event(evsel))
+ return true;
+
+ /* MMIO write begin event in kernel. */
+ if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
+ evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ /* MMIO write end event in kernel. */
+ if (kvm_entry_event(evsel))
+ return true;
+
+ /* MMIO read end event in kernel.*/
+ if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
+ evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#lx:%s",
+ (unsigned long)key->key,
+ key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
+}
+
+static const struct kvm_events_ops mmio_events = {
+ .is_begin_event = mmio_event_begin,
+ .is_end_event = mmio_event_end,
+ .decode_key = mmio_event_decode_key,
+ .name = "MMIO Access"
+};
+
+ /* The time of emulation pio access is from kvm_pio to kvm_entry. */
+static void ioport_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "port");
+ key->info = evsel__intval(evsel, sample, "rw");
+}
+
+static bool ioport_event_begin(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, "kvm:kvm_pio")) {
+ ioport_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool ioport_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
+ (unsigned long long)key->key,
+ key->info ? "POUT" : "PIN");
+}
+
+static const struct kvm_events_ops ioport_events = {
+ .is_begin_event = ioport_event_begin,
+ .is_end_event = ioport_event_end,
+ .decode_key = ioport_event_decode_key,
+ .name = "IO Port Access"
+};
+
+ /* The time of emulation msr is from kvm_msr to kvm_entry. */
+static void msr_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "ecx");
+ key->info = evsel__intval(evsel, sample, "write");
+}
+
+static bool msr_event_begin(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, "kvm:kvm_msr")) {
+ msr_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool msr_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
+ (unsigned long long)key->key,
+ key->info ? "W" : "R");
+}
+
+static const struct kvm_events_ops msr_events = {
+ .is_begin_event = msr_event_begin,
+ .is_end_event = msr_event_end,
+ .decode_key = msr_event_decode_key,
+ .name = "MSR Access"
+};
+
+static const char * const __kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ "kvm:kvm_mmio",
+ "kvm:kvm_pio",
+ "kvm:kvm_msr",
+ NULL,
+};
+
+static const struct kvm_reg_events_ops __kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events },
+ { .name = "mmio", .ops = &mmio_events },
+ { .name = "ioport", .ops = &ioport_events },
+ { .name = "msr", .ops = &msr_events },
+ { NULL, NULL },
+};
+
+static const char * const __kvm_skip_events[] = {
+ "HLT",
+ NULL,
+};
+
+int __cpu_isa_init_x86(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+ if (strstr(cpuid, "Intel")) {
+ kvm->exit_reasons = vmx_exit_reasons;
+ kvm->exit_reasons_isa = "VMX";
+ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
+ kvm->exit_reasons = svm_exit_reasons;
+ kvm->exit_reasons_isa = "SVM";
+ } else
+ return -ENOTSUP;
+
+ return 0;
+}
+
+/*
+ * After KVM supports PEBS for guest on Intel platforms
+ * (https://lore.kernel.org/all/20220411101946.20262-1-likexu@tencent.com/),
+ * host loses the capability to sample guest with PEBS since all PEBS related
+ * MSRs are switched to guest value after vm-entry, like IA32_DS_AREA MSR is
+ * switched to guest GVA at vm-entry. This would lead to "perf kvm record"
+ * fails to sample guest on Intel platforms since "cycles:P" event is used to
+ * sample guest by default.
+ *
+ * So, to avoid this issue explicitly use "cycles" instead of "cycles:P" event
+ * by default to sample guest on Intel platforms.
+ */
+int __kvm_add_default_arch_event_x86(int *argc, const char **argv)
+{
+ const char **tmp;
+ bool event = false;
+ int ret = 0, i, j = *argc;
+
+ const struct option event_options[] = {
+ OPT_BOOLEAN('e', "event", &event, NULL),
+ OPT_BOOLEAN(0, "pfm-events", &event, NULL),
+ OPT_END()
+ };
+
+ if (!x86__is_intel_cpu())
+ return 0;
+
+ tmp = calloc(j + 1, sizeof(char *));
+ if (!tmp)
+ return -ENOMEM;
+
+ for (i = 0; i < j; i++)
+ tmp[i] = argv[i];
+
+ parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
+ if (!event) {
+ argv[j++] = STRDUP_FAIL_EXIT("-e");
+ argv[j++] = STRDUP_FAIL_EXIT("cycles");
+ *argc += 2;
+ }
+
+ free(tmp);
+ return 0;
+
+EXIT:
+ free(tmp);
+ return ret;
+}
+
+const char * const *__kvm_events_tp_x86(void)
+{
+ return __kvm_events_tp;
+}
+
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_x86(void)
+{
+ return __kvm_reg_events_ops;
+}
+
+const char * const *__kvm_skip_events_x86(void)
+{
+ return __kvm_skip_events;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#ifndef ARCH_PERF_RISCV_TRAP_TYPES_H
+#define ARCH_PERF_RISCV_TRAP_TYPES_H
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG(xlen) (_AC(1, UL) << (xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
+#define IRQ_PMU_OVF 13
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+#define TRAP(x) { x, #x }
+
+#define kvm_riscv_trap_class \
+ TRAP(IRQ_S_SOFT), TRAP(IRQ_VS_SOFT), TRAP(IRQ_M_SOFT), \
+ TRAP(IRQ_S_TIMER), TRAP(IRQ_VS_TIMER), TRAP(IRQ_M_TIMER), \
+ TRAP(IRQ_S_EXT), TRAP(IRQ_VS_EXT), TRAP(IRQ_M_EXT), \
+ TRAP(IRQ_S_GEXT), TRAP(IRQ_PMU_OVF), \
+ TRAP(EXC_INST_MISALIGNED), TRAP(EXC_INST_ACCESS), TRAP(EXC_INST_ILLEGAL), \
+ TRAP(EXC_BREAKPOINT), TRAP(EXC_LOAD_MISALIGNED), TRAP(EXC_LOAD_ACCESS), \
+ TRAP(EXC_STORE_MISALIGNED), TRAP(EXC_STORE_ACCESS), TRAP(EXC_SYSCALL), \
+ TRAP(EXC_HYPERVISOR_SYSCALL), TRAP(EXC_SUPERVISOR_SYSCALL), \
+ TRAP(EXC_INST_PAGE_FAULT), TRAP(EXC_LOAD_PAGE_FAULT), \
+ TRAP(EXC_STORE_PAGE_FAULT), TRAP(EXC_INST_GUEST_PAGE_FAULT), \
+ TRAP(EXC_LOAD_GUEST_PAGE_FAULT), TRAP(EXC_VIRTUAL_INST_FAULT), \
+ TRAP(EXC_STORE_GUEST_PAGE_FAULT)
+
+#endif /* ARCH_PERF_RISCV_TRAP_TYPES_H */
#include "debug.h"
#include "evsel.h"
#include "kvm-stat.h"
-
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#include <dwarf-regs.h>
bool kvm_exit_event(struct evsel *evsel)
{
- return evsel__name_is(evsel, kvm_exit_trace);
+ return evsel__name_is(evsel, kvm_exit_trace());
}
void exit_event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
- key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason());
}
bool kvm_entry_event(struct evsel *evsel)
{
- return evsel__name_is(evsel, kvm_entry_trace);
+ return evsel__name_is(evsel, kvm_entry_trace());
}
bool exit_event_end(struct evsel *evsel,
scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
}
-#endif
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+{
+ switch (EM_HOST) {
+ case EM_PPC:
+ case EM_PPC64:
+ return __setup_kvm_events_tp_powerpc(kvm);
+ default:
+ return 0;
+ }
+}
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __cpu_isa_init_arm64(kvm);
+ case EM_LOONGARCH:
+ return __cpu_isa_init_loongarch(kvm);
+ case EM_PPC:
+ case EM_PPC64:
+ return __cpu_isa_init_powerpc(kvm);
+ case EM_RISCV:
+ return __cpu_isa_init_riscv(kvm);
+ case EM_S390:
+ return __cpu_isa_init_s390(kvm, cpuid);
+ case EM_X86_64:
+ case EM_386:
+ return __cpu_isa_init_x86(kvm, cpuid);
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return -1;
+ }
+}
+
+const char *vcpu_id_str(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_RISCV:
+ case EM_S390:
+ return "id";
+ case EM_LOONGARCH:
+ case EM_PPC:
+ case EM_PPC64:
+ case EM_X86_64:
+ case EM_386:
+ return "vcpu_id";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_exit_reason(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return "ret";
+ case EM_LOONGARCH:
+ return "reason";
+ case EM_PPC:
+ case EM_PPC64:
+ return "trap";
+ case EM_RISCV:
+ return "scause";
+ case EM_S390:
+ return "icptcode";
+ case EM_X86_64:
+ case EM_386:
+ return "exit_reason";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_entry_trace(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_RISCV:
+ case EM_X86_64:
+ case EM_386:
+ return "kvm:kvm_entry";
+ case EM_LOONGARCH:
+ return "kvm:kvm_enter";
+ case EM_PPC:
+ case EM_PPC64:
+ return "kvm_hv:kvm_guest_enter";
+ case EM_S390:
+ return "kvm:kvm_s390_sie_enter";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char *kvm_exit_trace(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ case EM_LOONGARCH:
+ case EM_RISCV:
+ case EM_X86_64:
+ case EM_386:
+ return "kvm:kvm_exit";
+ case EM_PPC:
+ case EM_PPC64:
+ return "kvm_hv:kvm_guest_exit";
+ case EM_S390:
+ return "kvm:kvm_s390_sie_exit";
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char * const *kvm_events_tp(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_events_tp_arm64();
+ case EM_LOONGARCH:
+ return __kvm_events_tp_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_events_tp_powerpc();
+ case EM_RISCV:
+ return __kvm_events_tp_riscv();
+ case EM_S390:
+ return __kvm_events_tp_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_events_tp_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const struct kvm_reg_events_ops *kvm_reg_events_ops(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_reg_events_ops_arm64();
+ case EM_LOONGARCH:
+ return __kvm_reg_events_ops_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_reg_events_ops_powerpc();
+ case EM_RISCV:
+ return __kvm_reg_events_ops_riscv();
+ case EM_S390:
+ return __kvm_reg_events_ops_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_reg_events_ops_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+const char * const *kvm_skip_events(void)
+{
+ switch (EM_HOST) {
+ case EM_AARCH64:
+ return __kvm_skip_events_arm64();
+ case EM_LOONGARCH:
+ return __kvm_skip_events_loongarch();
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_skip_events_powerpc();
+ case EM_RISCV:
+ return __kvm_skip_events_riscv();
+ case EM_S390:
+ return __kvm_skip_events_s390();
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_skip_events_x86();
+ default:
+ pr_err("Unsupported kvm-stat host %d\n", EM_HOST);
+ return NULL;
+ }
+}
+
+int kvm_add_default_arch_event(int *argc, const char **argv)
+{
+ switch (EM_HOST) {
+ case EM_PPC:
+ case EM_PPC64:
+ return __kvm_add_default_arch_event_powerpc(argc, argv);
+ case EM_X86_64:
+ case EM_386:
+ return __kvm_add_default_arch_event_x86(argc, argv);
+ default:
+ return 0;
+ }
+}
#ifndef __PERF_KVM_STAT_H
#define __PERF_KVM_STAT_H
-#ifdef HAVE_KVM_STAT_SUPPORT
-
#include "tool.h"
#include "sort.h"
#include "stat.h"
struct event_key *key);
bool (*is_end_event)(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key);
- struct child_event_ops *child_ops;
+ const struct child_event_ops *child_ops;
void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
char *decode);
const char *name;
struct exit_reasons_table *exit_reasons;
const char *exit_reasons_isa;
- struct kvm_events_ops *events_ops;
+ const struct kvm_events_ops *events_ops;
u64 total_time;
u64 total_count;
struct kvm_reg_events_ops {
const char *name;
- struct kvm_events_ops *ops;
+ const struct kvm_events_ops *ops;
};
-#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#ifdef HAVE_LIBTRACEEVENT
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char *decode);
-#endif
bool kvm_exit_event(struct evsel *evsel);
bool kvm_entry_event(struct evsel *evsel);
-int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
#define define_exit_reasons_table(name, symbols) \
static struct exit_reasons_table name[] = { \
/*
* arch specific callbacks and data structures
*/
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
+int __setup_kvm_events_tp_powerpc(struct perf_kvm_stat *kvm);
+
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
+int __cpu_isa_init_arm64(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_loongarch(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_powerpc(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_riscv(struct perf_kvm_stat *kvm);
+int __cpu_isa_init_s390(struct perf_kvm_stat *kvm, const char *cpuid);
+int __cpu_isa_init_x86(struct perf_kvm_stat *kvm, const char *cpuid);
+
+const char *vcpu_id_str(void);
+const char *kvm_exit_reason(void);
+const char *kvm_entry_trace(void);
+const char *kvm_exit_trace(void);
+
+const char * const *kvm_events_tp(void);
+const char * const *__kvm_events_tp_arm64(void);
+const char * const *__kvm_events_tp_loongarch(void);
+const char * const *__kvm_events_tp_powerpc(void);
+const char * const *__kvm_events_tp_riscv(void);
+const char * const *__kvm_events_tp_s390(void);
+const char * const *__kvm_events_tp_x86(void);
+
+const struct kvm_reg_events_ops *kvm_reg_events_ops(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_arm64(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_loongarch(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_powerpc(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_riscv(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_s390(void);
+const struct kvm_reg_events_ops *__kvm_reg_events_ops_x86(void);
+
+const char * const *kvm_skip_events(void);
+const char * const *__kvm_skip_events_arm64(void);
+const char * const *__kvm_skip_events_loongarch(void);
+const char * const *__kvm_skip_events_powerpc(void);
+const char * const *__kvm_skip_events_riscv(void);
+const char * const *__kvm_skip_events_s390(void);
+const char * const *__kvm_skip_events_x86(void);
+
+int kvm_add_default_arch_event(int *argc, const char **argv);
+int __kvm_add_default_arch_event_powerpc(int *argc, const char **argv);
+int __kvm_add_default_arch_event_x86(int *argc, const char **argv);
+
+#else /* !HAVE_LIBTRACEEVENT */
+
+static inline int kvm_add_default_arch_event(int *argc __maybe_unused,
+ const char **argv __maybe_unused)
+{
+ return 0;
+}
-extern const char *kvm_events_tp[];
-extern struct kvm_reg_events_ops kvm_reg_events_ops[];
-extern const char * const kvm_skip_events[];
-extern const char *vcpu_id_str;
-extern const char *kvm_exit_reason;
-extern const char *kvm_entry_trace;
-extern const char *kvm_exit_trace;
+#endif /* HAVE_LIBTRACEEVENT */
static inline struct kvm_info *kvm_info__get(struct kvm_info *ki)
{
return ki;
}
-#else /* HAVE_KVM_STAT_SUPPORT */
-// We use this unconditionally in hists__findnew_entry() and hist_entry__delete()
-#define kvm_info__zput(ki) do { } while (0)
-#endif /* HAVE_KVM_STAT_SUPPORT */
-
#define STRDUP_FAIL_EXIT(s) \
({ char *_p; \
_p = strdup(s); \
_p; \
})
-extern int kvm_add_default_arch_event(int *argc, const char **argv);
#endif /* __PERF_KVM_STAT_H */