#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
+#include "util/mmap.h"
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/cpu-set-sched.h"
+#include "util/synthetic-events.h"
#include "util/time-utils.h"
#include "util/units.h"
#include "util/bpf-event.h"
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
trigger_is_ready(&switch_output_trigger);
}
-static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
+static int record__write(struct record *rec, struct mmap *map __maybe_unused,
void *bf, size_t size)
{
struct perf_data_file *file = &rec->session->data->file;
return rc;
}
-static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
+static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
{
void *rem_buf;
off_t rem_off;
return rc;
}
-static int record__aio_sync(struct perf_mmap *md, bool sync_all)
+static int record__aio_sync(struct mmap *md, bool sync_all)
{
struct aiocb **aiocb = md->aio.aiocb;
struct aiocb *cblocks = md->aio.cblocks;
size_t size;
};
-static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
+static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
{
struct record_aio *aio = to;
/*
- * map->base data pointed by buf is copied into free map->aio.data[] buffer
+ * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
* to release space in the kernel buffer as fast as possible, calling
* perf_mmap__consume() from perf_mmap__push() function.
*
return size;
}
-static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
+static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
{
int ret, idx;
int trace_fd = rec->session->data->file.fd;
{
int i;
struct evlist *evlist = rec->evlist;
- struct perf_mmap *maps = evlist->mmap;
+ struct mmap *maps = evlist->mmap;
if (!record__aio_enabled(rec))
return;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &maps[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &maps[i];
- if (map->base)
+ if (map->core.base)
record__aio_sync(map, true);
}
}
#else /* HAVE_AIO_SUPPORT */
static int nr_cblocks_max = 0;
-static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
+static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
off_t *off __maybe_unused)
{
return -1;
if (!opts->mmap_flush)
opts->mmap_flush = MMAP_FLUSH_DEFAULT;
- flush_max = perf_evlist__mmap_size(opts->mmap_pages);
+ flush_max = evlist__mmap_size(opts->mmap_pages);
flush_max /= 4;
if (opts->mmap_flush > flush_max)
opts->mmap_flush = flush_max;
return record__write(rec, NULL, event, event->header.size);
}
-static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
+static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
- struct perf_mmap *map,
+ struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
}
static int record__auxtrace_mmap_read(struct record *rec,
- struct perf_mmap *map)
+ struct mmap *map)
{
int ret;
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
- struct perf_mmap *map)
+ struct mmap *map)
{
int ret;
int i;
int rc = 0;
- for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &rec->evlist->mmap[i];
+ for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &rec->evlist->mmap[i];
if (!map->auxtrace_mmap.base)
continue;
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct perf_mmap *map __maybe_unused)
+ struct mmap *map __maybe_unused)
{
return 0;
}
if (opts->affinity != PERF_AFFINITY_SYS)
cpu__setup_cpunode_map();
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
+ if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
opts->nr_cblocks, opts->affinity,
if (perf_evlist__add_dummy(evlist))
return -ENOMEM;
- pos = perf_evlist__first(evlist);
+ pos = evlist__first(evlist);
pos->tracking = 0;
- pos = perf_evlist__last(evlist);
+ pos = evlist__last(evlist);
pos->tracking = 1;
pos->core.attr.enable_on_exec = 1;
}
pos->supported = true;
}
+ if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
+ pr_warning(
+"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
+"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
+"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
+"file is not found in the buildid cache or in the vmlinux path.\n\n"
+"Samples in kernel modules won't be resolved at all.\n\n"
+"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
+"even with a suitable vmlinux or kallsyms file.\n\n");
+ }
+
if (perf_evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+static void record__adjust_affinity(struct record *rec, struct mmap *map)
{
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
!CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
- struct perf_mmap *maps;
+ struct mmap *maps;
int trace_fd = rec->data.file.fd;
off_t off = 0;
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
u64 flush = 0;
- struct perf_mmap *map = &maps[i];
+ struct mmap *map = &maps[i];
- if (map->base) {
+ if (map->core.base) {
record__adjust_affinity(rec, map);
if (synch) {
- flush = map->flush;
- map->flush = 1;
+ flush = map->core.flush;
+ map->core.flush = 1;
}
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) < 0) {
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
rc = -1;
goto out;
}
if (record__aio_push(rec, map, &off) < 0) {
record__aio_set_pos(trace_fd, off);
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
rc = -1;
goto out;
}
}
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
static void snapshot_sig_handler(int sig);
static void alarm_sig_handler(int sig);
-int __weak
-perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
- struct perf_tool *tool __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- return 0;
-}
-
static const struct perf_event_mmap_page *
perf_evlist__pick_pc(struct evlist *evlist)
{
if (evlist) {
- if (evlist->mmap && evlist->mmap[0].base)
- return evlist->mmap[0].base;
- if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
- return evlist->overwrite_mmap[0].base;
+ if (evlist->mmap && evlist->mmap[0].core.base)
+ return evlist->mmap[0].core.base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
+ return evlist->overwrite_mmap[0].core.base;
}
return NULL;
}
}
session = perf_session__new(data, false, tool);
- if (session == NULL) {
+ if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
- return -1;
+ return PTR_ERR(session);
}
fd = perf_data__fd(data);
err = -1;
goto out_child;
}
- session->header.env.comp_mmap_len = session->evlist->mmap_len;
+ session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
err = bpf__apply_obj_config();
if (err) {
if (hits == rec->samples) {
if (done || draining)
break;
- err = perf_evlist__poll(rec->evlist, -1);
+ err = evlist__poll(rec->evlist, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
err = 0;
waking++;
- if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
+ if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
draining = true;
}
static void switch_output_size_warn(struct record *rec)
{
- u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
+ u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
struct switch_output *s = &rec->switch_output;
wakeup_size /= 2;
err = -ENOMEM;
- if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
- pr_warning(
-"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
-"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
-"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
-"file is not found in the buildid cache or in the vmlinux path.\n\n"
-"Samples in kernel modules won't be resolved at all.\n\n"
-"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
-"even with a suitable vmlinux or kallsyms file.\n\n");
-
if (rec->no_buildid_cache || rec->no_buildid) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {