2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "util.h" /* page_size */
18 size_t perf_mmap__mmap_len(struct perf_mmap *map)
20 return map->mask + 1 + page_size;
23 /* When check_messup is true, 'end' must points to a good entry */
24 static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
25 u64 start, u64 end, u64 *prev)
27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL;
29 int diff = end - start;
33 * If we're further behind than half the buffer, there's a chance
34 * the writer will bite our tail and mess up the samples under us.
36 * If we somehow ended up ahead of the 'end', we got messed up.
38 * In either case, truncate and restart at 'end'.
40 if (diff > map->mask / 2 || diff < 0) {
41 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
44 * 'end' points to a known good entry, start there.
51 if (diff >= (int)sizeof(event->header)) {
54 event = (union perf_event *)&data[start & map->mask];
55 size = event->header.size;
57 if (size < sizeof(event->header) || diff < (int)size) {
63 * Event straddles the mmap boundary -- header should always
64 * be inside due to u64 alignment of output.
66 if ((start & map->mask) + size != ((start + size) & map->mask)) {
67 unsigned int offset = start;
68 unsigned int len = min(sizeof(*event), size), cpy;
69 void *dst = map->event_copy;
72 cpy = min(map->mask + 1 - (offset & map->mask), len);
73 memcpy(dst, &data[offset & map->mask], cpy);
79 event = (union perf_event *)map->event_copy;
92 union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
98 * Check if event was unmapped due to a POLLHUP/POLLERR.
100 if (!refcount_read(&map->refcnt))
103 head = perf_mmap__read_head(map);
105 return perf_mmap__read(map, check_messup, old, head, &map->prev);
108 union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
111 u64 start = map->prev;
114 * Check if event was unmapped due to a POLLHUP/POLLERR.
116 if (!refcount_read(&map->refcnt))
119 head = perf_mmap__read_head(map);
124 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
125 * it each time when kernel writes to it, so in fact 'head' is
126 * negative. 'end' pointer is made manually by adding the size of
127 * the ring buffer to 'head' pointer, means the validate data can
128 * read is the whole ring buffer. If 'end' is positive, the ring
129 * buffer has not fully filled, so we must adjust 'end' to 0.
131 * However, since both 'head' and 'end' is unsigned, we can't
132 * simply compare 'end' against 0. Here we compare '-head' and
133 * the size of the ring buffer, where -head is the number of bytes
134 * kernel write to the ring buffer.
136 if (-head < (u64)(map->mask + 1))
139 end = head + map->mask + 1;
141 return perf_mmap__read(map, false, start, end, &map->prev);
144 void perf_mmap__read_catchup(struct perf_mmap *map)
148 if (!refcount_read(&map->refcnt))
151 head = perf_mmap__read_head(map);
155 static bool perf_mmap__empty(struct perf_mmap *map)
157 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
160 void perf_mmap__get(struct perf_mmap *map)
162 refcount_inc(&map->refcnt);
165 void perf_mmap__put(struct perf_mmap *map)
167 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
169 if (refcount_dec_and_test(&map->refcnt))
170 perf_mmap__munmap(map);
173 void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
178 perf_mmap__write_tail(map, old);
181 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
185 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
186 struct auxtrace_mmap_params *mp __maybe_unused,
187 void *userpg __maybe_unused,
188 int fd __maybe_unused)
193 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
197 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
198 off_t auxtrace_offset __maybe_unused,
199 unsigned int auxtrace_pages __maybe_unused,
200 bool auxtrace_overwrite __maybe_unused)
204 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
205 struct perf_evlist *evlist __maybe_unused,
206 int idx __maybe_unused,
207 bool per_cpu __maybe_unused)
211 void perf_mmap__munmap(struct perf_mmap *map)
213 if (map->base != NULL) {
214 munmap(map->base, perf_mmap__mmap_len(map));
217 refcount_set(&map->refcnt, 0);
219 auxtrace_mmap__munmap(&map->auxtrace_mmap);
222 int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
225 * The last one will be done at perf_evlist__mmap_consume(), so that we
226 * make sure we don't prevent tools from consuming every last event in
229 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
230 * anymore, but the last events for it are still in the ring buffer,
231 * waiting to be consumed.
233 * Tools can chose to ignore this at their own discretion, but the
234 * evlist layer can't just drop it when filtering events in
235 * perf_evlist__filter_pollfd().
237 refcount_set(&map->refcnt, 2);
239 map->mask = mp->mask;
240 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
242 if (map->base == MAP_FAILED) {
243 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
250 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
251 &mp->auxtrace_mp, map->base, fd))
257 static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
259 struct perf_event_header *pheader;
263 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
264 pheader = (struct perf_event_header *)(buf + (head & mask));
267 if (evt_head - head >= (unsigned int)size) {
268 pr_debug("Finished reading backward ring buffer: rewind\n");
269 if (evt_head - head > (unsigned int)size)
270 evt_head -= pheader->size;
275 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
277 if (pheader->size == 0) {
278 pr_debug("Finished reading backward ring buffer: get start\n");
283 evt_head += pheader->size;
284 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
286 WARN_ONCE(1, "Shouldn't get here\n");
290 static int rb_find_range(void *data, int mask, u64 head, u64 old,
291 u64 *start, u64 *end, bool backward)
299 return backward_rb_find_range(data, mask, head, start, end);
302 int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
303 void *to, int push(void *to, void *buf, size_t size))
305 u64 head = perf_mmap__read_head(md);
307 u64 end = head, start = old;
308 unsigned char *data = md->base + page_size;
313 if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
320 if (size > (unsigned long)(md->mask) + 1) {
321 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
324 perf_mmap__consume(md, overwrite || backward);
328 if ((start & md->mask) + size != (end & md->mask)) {
329 buf = &data[start & md->mask];
330 size = md->mask + 1 - (start & md->mask);
333 if (push(to, buf, size) < 0) {
339 buf = &data[start & md->mask];
343 if (push(to, buf, size) < 0) {
349 perf_mmap__consume(md, overwrite || backward);