Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / trace / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/trace_events.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/trace_clock.h>
10 #include <linux/sched/clock.h>
11 #include <linux/trace_seq.h>
12 #include <linux/spinlock.h>
13 #include <linux/irq_work.h>
14 #include <linux/uaccess.h>
15 #include <linux/hardirq.h>
16 #include <linux/kthread.h>      /* for self test */
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 #include <linux/oom.h>
27
28 #include <asm/local.h>
29
30 static void update_pages_handler(struct work_struct *work);
31
32 /*
33  * The ring buffer header is special. We must manually up keep it.
34  */
35 int ring_buffer_print_entry_header(struct trace_seq *s)
36 {
37         trace_seq_puts(s, "# compressed entry header\n");
38         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
39         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
40         trace_seq_puts(s, "\tarray       :   32 bits\n");
41         trace_seq_putc(s, '\n');
42         trace_seq_printf(s, "\tpadding     : type == %d\n",
43                          RINGBUF_TYPE_PADDING);
44         trace_seq_printf(s, "\ttime_extend : type == %d\n",
45                          RINGBUF_TYPE_TIME_EXTEND);
46         trace_seq_printf(s, "\ttime_stamp : type == %d\n",
47                          RINGBUF_TYPE_TIME_STAMP);
48         trace_seq_printf(s, "\tdata max type_len  == %d\n",
49                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51         return !trace_seq_has_overflowed(s);
52 }
53
54 /*
55  * The ring buffer is made up of a list of pages. A separate list of pages is
56  * allocated for each CPU. A writer may only write to a buffer that is
57  * associated with the CPU it is currently executing on.  A reader may read
58  * from any per cpu buffer.
59  *
60  * The reader is special. For each per cpu buffer, the reader has its own
61  * reader page. When a reader has read the entire reader page, this reader
62  * page is swapped with another page in the ring buffer.
63  *
64  * Now, as long as the writer is off the reader page, the reader can do what
65  * ever it wants with that page. The writer will never write to that page
66  * again (as long as it is out of the ring buffer).
67  *
68  * Here's some silly ASCII art.
69  *
70  *   +------+
71  *   |reader|          RING BUFFER
72  *   |page  |
73  *   +------+        +---+   +---+   +---+
74  *                   |   |-->|   |-->|   |
75  *                   +---+   +---+   +---+
76  *                     ^               |
77  *                     |               |
78  *                     +---------------+
79  *
80  *
81  *   +------+
82  *   |reader|          RING BUFFER
83  *   |page  |------------------v
84  *   +------+        +---+   +---+   +---+
85  *                   |   |-->|   |-->|   |
86  *                   +---+   +---+   +---+
87  *                     ^               |
88  *                     |               |
89  *                     +---------------+
90  *
91  *
92  *   +------+
93  *   |reader|          RING BUFFER
94  *   |page  |------------------v
95  *   +------+        +---+   +---+   +---+
96  *      ^            |   |-->|   |-->|   |
97  *      |            +---+   +---+   +---+
98  *      |                              |
99  *      |                              |
100  *      +------------------------------+
101  *
102  *
103  *   +------+
104  *   |buffer|          RING BUFFER
105  *   |page  |------------------v
106  *   +------+        +---+   +---+   +---+
107  *      ^            |   |   |   |-->|   |
108  *      |   New      +---+   +---+   +---+
109  *      |  Reader------^               |
110  *      |   page                       |
111  *      +------------------------------+
112  *
113  *
114  * After we make this swap, the reader can hand this page off to the splice
115  * code and be done with it. It can even allocate a new page if it needs to
116  * and swap that into the ring buffer.
117  *
118  * We will be using cmpxchg soon to make all this lockless.
119  *
120  */
121
122 /* Used for individual buffers (after the counter) */
123 #define RB_BUFFER_OFF           (1 << 20)
124
125 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
126
127 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
128 #define RB_ALIGNMENT            4U
129 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
130 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
131
132 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
133 # define RB_FORCE_8BYTE_ALIGNMENT       0
134 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
135 #else
136 # define RB_FORCE_8BYTE_ALIGNMENT       1
137 # define RB_ARCH_ALIGNMENT              8U
138 #endif
139
140 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
141
142 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
143 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
144
145 enum {
146         RB_LEN_TIME_EXTEND = 8,
147         RB_LEN_TIME_STAMP =  8,
148 };
149
150 #define skip_time_extend(event) \
151         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
152
153 #define extended_time(event) \
154         (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
155
156 static inline int rb_null_event(struct ring_buffer_event *event)
157 {
158         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
159 }
160
161 static void rb_event_set_padding(struct ring_buffer_event *event)
162 {
163         /* padding has a NULL time_delta */
164         event->type_len = RINGBUF_TYPE_PADDING;
165         event->time_delta = 0;
166 }
167
168 static unsigned
169 rb_event_data_length(struct ring_buffer_event *event)
170 {
171         unsigned length;
172
173         if (event->type_len)
174                 length = event->type_len * RB_ALIGNMENT;
175         else
176                 length = event->array[0];
177         return length + RB_EVNT_HDR_SIZE;
178 }
179
180 /*
181  * Return the length of the given event. Will return
182  * the length of the time extend if the event is a
183  * time extend.
184  */
185 static inline unsigned
186 rb_event_length(struct ring_buffer_event *event)
187 {
188         switch (event->type_len) {
189         case RINGBUF_TYPE_PADDING:
190                 if (rb_null_event(event))
191                         /* undefined */
192                         return -1;
193                 return  event->array[0] + RB_EVNT_HDR_SIZE;
194
195         case RINGBUF_TYPE_TIME_EXTEND:
196                 return RB_LEN_TIME_EXTEND;
197
198         case RINGBUF_TYPE_TIME_STAMP:
199                 return RB_LEN_TIME_STAMP;
200
201         case RINGBUF_TYPE_DATA:
202                 return rb_event_data_length(event);
203         default:
204                 BUG();
205         }
206         /* not hit */
207         return 0;
208 }
209
210 /*
211  * Return total length of time extend and data,
212  *   or just the event length for all other events.
213  */
214 static inline unsigned
215 rb_event_ts_length(struct ring_buffer_event *event)
216 {
217         unsigned len = 0;
218
219         if (extended_time(event)) {
220                 /* time extends include the data event after it */
221                 len = RB_LEN_TIME_EXTEND;
222                 event = skip_time_extend(event);
223         }
224         return len + rb_event_length(event);
225 }
226
227 /**
228  * ring_buffer_event_length - return the length of the event
229  * @event: the event to get the length of
230  *
231  * Returns the size of the data load of a data event.
232  * If the event is something other than a data event, it
233  * returns the size of the event itself. With the exception
234  * of a TIME EXTEND, where it still returns the size of the
235  * data load of the data event after it.
236  */
237 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
238 {
239         unsigned length;
240
241         if (extended_time(event))
242                 event = skip_time_extend(event);
243
244         length = rb_event_length(event);
245         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
246                 return length;
247         length -= RB_EVNT_HDR_SIZE;
248         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
249                 length -= sizeof(event->array[0]);
250         return length;
251 }
252 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
253
254 /* inline for ring buffer fast paths */
255 static __always_inline void *
256 rb_event_data(struct ring_buffer_event *event)
257 {
258         if (extended_time(event))
259                 event = skip_time_extend(event);
260         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
261         /* If length is in len field, then array[0] has the data */
262         if (event->type_len)
263                 return (void *)&event->array[0];
264         /* Otherwise length is in array[0] and array[1] has the data */
265         return (void *)&event->array[1];
266 }
267
268 /**
269  * ring_buffer_event_data - return the data of the event
270  * @event: the event to get the data from
271  */
272 void *ring_buffer_event_data(struct ring_buffer_event *event)
273 {
274         return rb_event_data(event);
275 }
276 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
277
278 #define for_each_buffer_cpu(buffer, cpu)                \
279         for_each_cpu(cpu, buffer->cpumask)
280
281 #define TS_SHIFT        27
282 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
283 #define TS_DELTA_TEST   (~TS_MASK)
284
285 /**
286  * ring_buffer_event_time_stamp - return the event's extended timestamp
287  * @event: the event to get the timestamp of
288  *
289  * Returns the extended timestamp associated with a data event.
290  * An extended time_stamp is a 64-bit timestamp represented
291  * internally in a special way that makes the best use of space
292  * contained within a ring buffer event.  This function decodes
293  * it and maps it to a straight u64 value.
294  */
295 u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
296 {
297         u64 ts;
298
299         ts = event->array[0];
300         ts <<= TS_SHIFT;
301         ts += event->time_delta;
302
303         return ts;
304 }
305
306 /* Flag when events were overwritten */
307 #define RB_MISSED_EVENTS        (1 << 31)
308 /* Missed count stored at end */
309 #define RB_MISSED_STORED        (1 << 30)
310
311 #define RB_MISSED_FLAGS         (RB_MISSED_EVENTS|RB_MISSED_STORED)
312
313 struct buffer_data_page {
314         u64              time_stamp;    /* page time stamp */
315         local_t          commit;        /* write committed index */
316         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
317 };
318
319 /*
320  * Note, the buffer_page list must be first. The buffer pages
321  * are allocated in cache lines, which means that each buffer
322  * page will be at the beginning of a cache line, and thus
323  * the least significant bits will be zero. We use this to
324  * add flags in the list struct pointers, to make the ring buffer
325  * lockless.
326  */
327 struct buffer_page {
328         struct list_head list;          /* list of buffer pages */
329         local_t          write;         /* index for next write */
330         unsigned         read;          /* index for next read */
331         local_t          entries;       /* entries on this page */
332         unsigned long    real_end;      /* real end of data */
333         struct buffer_data_page *page;  /* Actual data page */
334 };
335
336 /*
337  * The buffer page counters, write and entries, must be reset
338  * atomically when crossing page boundaries. To synchronize this
339  * update, two counters are inserted into the number. One is
340  * the actual counter for the write position or count on the page.
341  *
342  * The other is a counter of updaters. Before an update happens
343  * the update partition of the counter is incremented. This will
344  * allow the updater to update the counter atomically.
345  *
346  * The counter is 20 bits, and the state data is 12.
347  */
348 #define RB_WRITE_MASK           0xfffff
349 #define RB_WRITE_INTCNT         (1 << 20)
350
351 static void rb_init_page(struct buffer_data_page *bpage)
352 {
353         local_set(&bpage->commit, 0);
354 }
355
356 /**
357  * ring_buffer_page_len - the size of data on the page.
358  * @page: The page to read
359  *
360  * Returns the amount of data on the page, including buffer page header.
361  */
362 size_t ring_buffer_page_len(void *page)
363 {
364         struct buffer_data_page *bpage = page;
365
366         return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
367                 + BUF_PAGE_HDR_SIZE;
368 }
369
370 /*
371  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
372  * this issue out.
373  */
374 static void free_buffer_page(struct buffer_page *bpage)
375 {
376         free_page((unsigned long)bpage->page);
377         kfree(bpage);
378 }
379
380 /*
381  * We need to fit the time_stamp delta into 27 bits.
382  */
383 static inline int test_time_stamp(u64 delta)
384 {
385         if (delta & TS_DELTA_TEST)
386                 return 1;
387         return 0;
388 }
389
390 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
391
392 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
393 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
394
395 int ring_buffer_print_page_header(struct trace_seq *s)
396 {
397         struct buffer_data_page field;
398
399         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400                          "offset:0;\tsize:%u;\tsigned:%u;\n",
401                          (unsigned int)sizeof(field.time_stamp),
402                          (unsigned int)is_signed_type(u64));
403
404         trace_seq_printf(s, "\tfield: local_t commit;\t"
405                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
406                          (unsigned int)offsetof(typeof(field), commit),
407                          (unsigned int)sizeof(field.commit),
408                          (unsigned int)is_signed_type(long));
409
410         trace_seq_printf(s, "\tfield: int overwrite;\t"
411                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
412                          (unsigned int)offsetof(typeof(field), commit),
413                          1,
414                          (unsigned int)is_signed_type(long));
415
416         trace_seq_printf(s, "\tfield: char data;\t"
417                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
418                          (unsigned int)offsetof(typeof(field), data),
419                          (unsigned int)BUF_PAGE_SIZE,
420                          (unsigned int)is_signed_type(char));
421
422         return !trace_seq_has_overflowed(s);
423 }
424
425 struct rb_irq_work {
426         struct irq_work                 work;
427         wait_queue_head_t               waiters;
428         wait_queue_head_t               full_waiters;
429         bool                            waiters_pending;
430         bool                            full_waiters_pending;
431         bool                            wakeup_full;
432 };
433
434 /*
435  * Structure to hold event state and handle nested events.
436  */
437 struct rb_event_info {
438         u64                     ts;
439         u64                     delta;
440         unsigned long           length;
441         struct buffer_page      *tail_page;
442         int                     add_timestamp;
443 };
444
445 /*
446  * Used for which event context the event is in.
447  *  NMI     = 0
448  *  IRQ     = 1
449  *  SOFTIRQ = 2
450  *  NORMAL  = 3
451  *
452  * See trace_recursive_lock() comment below for more details.
453  */
454 enum {
455         RB_CTX_NMI,
456         RB_CTX_IRQ,
457         RB_CTX_SOFTIRQ,
458         RB_CTX_NORMAL,
459         RB_CTX_MAX
460 };
461
462 /*
463  * head_page == tail_page && head == tail then buffer is empty.
464  */
465 struct ring_buffer_per_cpu {
466         int                             cpu;
467         atomic_t                        record_disabled;
468         struct ring_buffer              *buffer;
469         raw_spinlock_t                  reader_lock;    /* serialize readers */
470         arch_spinlock_t                 lock;
471         struct lock_class_key           lock_key;
472         struct buffer_data_page         *free_page;
473         unsigned long                   nr_pages;
474         unsigned int                    current_context;
475         struct list_head                *pages;
476         struct buffer_page              *head_page;     /* read from head */
477         struct buffer_page              *tail_page;     /* write to tail */
478         struct buffer_page              *commit_page;   /* committed pages */
479         struct buffer_page              *reader_page;
480         unsigned long                   lost_events;
481         unsigned long                   last_overrun;
482         unsigned long                   nest;
483         local_t                         entries_bytes;
484         local_t                         entries;
485         local_t                         overrun;
486         local_t                         commit_overrun;
487         local_t                         dropped_events;
488         local_t                         committing;
489         local_t                         commits;
490         local_t                         pages_touched;
491         local_t                         pages_read;
492         long                            last_pages_touch;
493         size_t                          shortest_full;
494         unsigned long                   read;
495         unsigned long                   read_bytes;
496         u64                             write_stamp;
497         u64                             read_stamp;
498         /* ring buffer pages to update, > 0 to add, < 0 to remove */
499         long                            nr_pages_to_update;
500         struct list_head                new_pages; /* new pages to add */
501         struct work_struct              update_pages_work;
502         struct completion               update_done;
503
504         struct rb_irq_work              irq_work;
505 };
506
507 struct ring_buffer {
508         unsigned                        flags;
509         int                             cpus;
510         atomic_t                        record_disabled;
511         atomic_t                        resize_disabled;
512         cpumask_var_t                   cpumask;
513
514         struct lock_class_key           *reader_lock_key;
515
516         struct mutex                    mutex;
517
518         struct ring_buffer_per_cpu      **buffers;
519
520         struct hlist_node               node;
521         u64                             (*clock)(void);
522
523         struct rb_irq_work              irq_work;
524         bool                            time_stamp_abs;
525 };
526
527 struct ring_buffer_iter {
528         struct ring_buffer_per_cpu      *cpu_buffer;
529         unsigned long                   head;
530         struct buffer_page              *head_page;
531         struct buffer_page              *cache_reader_page;
532         unsigned long                   cache_read;
533         u64                             read_stamp;
534 };
535
536 /**
537  * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
538  * @buffer: The ring_buffer to get the number of pages from
539  * @cpu: The cpu of the ring_buffer to get the number of pages from
540  *
541  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
542  */
543 size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
544 {
545         return buffer->buffers[cpu]->nr_pages;
546 }
547
548 /**
549  * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
550  * @buffer: The ring_buffer to get the number of pages from
551  * @cpu: The cpu of the ring_buffer to get the number of pages from
552  *
553  * Returns the number of pages that have content in the ring buffer.
554  */
555 size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
556 {
557         size_t read;
558         size_t cnt;
559
560         read = local_read(&buffer->buffers[cpu]->pages_read);
561         cnt = local_read(&buffer->buffers[cpu]->pages_touched);
562         /* The reader can read an empty page, but not more than that */
563         if (cnt < read) {
564                 WARN_ON_ONCE(read > cnt + 1);
565                 return 0;
566         }
567
568         return cnt - read;
569 }
570
571 /*
572  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
573  *
574  * Schedules a delayed work to wake up any task that is blocked on the
575  * ring buffer waiters queue.
576  */
577 static void rb_wake_up_waiters(struct irq_work *work)
578 {
579         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
580
581         wake_up_all(&rbwork->waiters);
582         if (rbwork->wakeup_full) {
583                 rbwork->wakeup_full = false;
584                 wake_up_all(&rbwork->full_waiters);
585         }
586 }
587
588 /**
589  * ring_buffer_wait - wait for input to the ring buffer
590  * @buffer: buffer to wait on
591  * @cpu: the cpu buffer to wait on
592  * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
593  *
594  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
595  * as data is added to any of the @buffer's cpu buffers. Otherwise
596  * it will wait for data to be added to a specific cpu buffer.
597  */
598 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
599 {
600         struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
601         DEFINE_WAIT(wait);
602         struct rb_irq_work *work;
603         int ret = 0;
604
605         /*
606          * Depending on what the caller is waiting for, either any
607          * data in any cpu buffer, or a specific buffer, put the
608          * caller on the appropriate wait queue.
609          */
610         if (cpu == RING_BUFFER_ALL_CPUS) {
611                 work = &buffer->irq_work;
612                 /* Full only makes sense on per cpu reads */
613                 full = 0;
614         } else {
615                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
616                         return -ENODEV;
617                 cpu_buffer = buffer->buffers[cpu];
618                 work = &cpu_buffer->irq_work;
619         }
620
621
622         while (true) {
623                 if (full)
624                         prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
625                 else
626                         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
627
628                 /*
629                  * The events can happen in critical sections where
630                  * checking a work queue can cause deadlocks.
631                  * After adding a task to the queue, this flag is set
632                  * only to notify events to try to wake up the queue
633                  * using irq_work.
634                  *
635                  * We don't clear it even if the buffer is no longer
636                  * empty. The flag only causes the next event to run
637                  * irq_work to do the work queue wake up. The worse
638                  * that can happen if we race with !trace_empty() is that
639                  * an event will cause an irq_work to try to wake up
640                  * an empty queue.
641                  *
642                  * There's no reason to protect this flag either, as
643                  * the work queue and irq_work logic will do the necessary
644                  * synchronization for the wake ups. The only thing
645                  * that is necessary is that the wake up happens after
646                  * a task has been queued. It's OK for spurious wake ups.
647                  */
648                 if (full)
649                         work->full_waiters_pending = true;
650                 else
651                         work->waiters_pending = true;
652
653                 if (signal_pending(current)) {
654                         ret = -EINTR;
655                         break;
656                 }
657
658                 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
659                         break;
660
661                 if (cpu != RING_BUFFER_ALL_CPUS &&
662                     !ring_buffer_empty_cpu(buffer, cpu)) {
663                         unsigned long flags;
664                         bool pagebusy;
665                         size_t nr_pages;
666                         size_t dirty;
667
668                         if (!full)
669                                 break;
670
671                         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
672                         pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
673                         nr_pages = cpu_buffer->nr_pages;
674                         dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
675                         if (!cpu_buffer->shortest_full ||
676                             cpu_buffer->shortest_full < full)
677                                 cpu_buffer->shortest_full = full;
678                         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
679                         if (!pagebusy &&
680                             (!nr_pages || (dirty * 100) > full * nr_pages))
681                                 break;
682                 }
683
684                 schedule();
685         }
686
687         if (full)
688                 finish_wait(&work->full_waiters, &wait);
689         else
690                 finish_wait(&work->waiters, &wait);
691
692         return ret;
693 }
694
695 /**
696  * ring_buffer_poll_wait - poll on buffer input
697  * @buffer: buffer to wait on
698  * @cpu: the cpu buffer to wait on
699  * @filp: the file descriptor
700  * @poll_table: The poll descriptor
701  *
702  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
703  * as data is added to any of the @buffer's cpu buffers. Otherwise
704  * it will wait for data to be added to a specific cpu buffer.
705  *
706  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
707  * zero otherwise.
708  */
709 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
710                           struct file *filp, poll_table *poll_table)
711 {
712         struct ring_buffer_per_cpu *cpu_buffer;
713         struct rb_irq_work *work;
714
715         if (cpu == RING_BUFFER_ALL_CPUS)
716                 work = &buffer->irq_work;
717         else {
718                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
719                         return -EINVAL;
720
721                 cpu_buffer = buffer->buffers[cpu];
722                 work = &cpu_buffer->irq_work;
723         }
724
725         poll_wait(filp, &work->waiters, poll_table);
726         work->waiters_pending = true;
727         /*
728          * There's a tight race between setting the waiters_pending and
729          * checking if the ring buffer is empty.  Once the waiters_pending bit
730          * is set, the next event will wake the task up, but we can get stuck
731          * if there's only a single event in.
732          *
733          * FIXME: Ideally, we need a memory barrier on the writer side as well,
734          * but adding a memory barrier to all events will cause too much of a
735          * performance hit in the fast path.  We only need a memory barrier when
736          * the buffer goes from empty to having content.  But as this race is
737          * extremely small, and it's not a problem if another event comes in, we
738          * will fix it later.
739          */
740         smp_mb();
741
742         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
743             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
744                 return EPOLLIN | EPOLLRDNORM;
745         return 0;
746 }
747
748 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
749 #define RB_WARN_ON(b, cond)                                             \
750         ({                                                              \
751                 int _____ret = unlikely(cond);                          \
752                 if (_____ret) {                                         \
753                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
754                                 struct ring_buffer_per_cpu *__b =       \
755                                         (void *)b;                      \
756                                 atomic_inc(&__b->buffer->record_disabled); \
757                         } else                                          \
758                                 atomic_inc(&b->record_disabled);        \
759                         WARN_ON(1);                                     \
760                 }                                                       \
761                 _____ret;                                               \
762         })
763
764 /* Up this if you want to test the TIME_EXTENTS and normalization */
765 #define DEBUG_SHIFT 0
766
767 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
768 {
769         /* shift to debug/test normalization and TIME_EXTENTS */
770         return buffer->clock() << DEBUG_SHIFT;
771 }
772
773 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
774 {
775         u64 time;
776
777         preempt_disable_notrace();
778         time = rb_time_stamp(buffer);
779         preempt_enable_no_resched_notrace();
780
781         return time;
782 }
783 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
784
785 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
786                                       int cpu, u64 *ts)
787 {
788         /* Just stupid testing the normalize function and deltas */
789         *ts >>= DEBUG_SHIFT;
790 }
791 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
792
793 /*
794  * Making the ring buffer lockless makes things tricky.
795  * Although writes only happen on the CPU that they are on,
796  * and they only need to worry about interrupts. Reads can
797  * happen on any CPU.
798  *
799  * The reader page is always off the ring buffer, but when the
800  * reader finishes with a page, it needs to swap its page with
801  * a new one from the buffer. The reader needs to take from
802  * the head (writes go to the tail). But if a writer is in overwrite
803  * mode and wraps, it must push the head page forward.
804  *
805  * Here lies the problem.
806  *
807  * The reader must be careful to replace only the head page, and
808  * not another one. As described at the top of the file in the
809  * ASCII art, the reader sets its old page to point to the next
810  * page after head. It then sets the page after head to point to
811  * the old reader page. But if the writer moves the head page
812  * during this operation, the reader could end up with the tail.
813  *
814  * We use cmpxchg to help prevent this race. We also do something
815  * special with the page before head. We set the LSB to 1.
816  *
817  * When the writer must push the page forward, it will clear the
818  * bit that points to the head page, move the head, and then set
819  * the bit that points to the new head page.
820  *
821  * We also don't want an interrupt coming in and moving the head
822  * page on another writer. Thus we use the second LSB to catch
823  * that too. Thus:
824  *
825  * head->list->prev->next        bit 1          bit 0
826  *                              -------        -------
827  * Normal page                     0              0
828  * Points to head page             0              1
829  * New head page                   1              0
830  *
831  * Note we can not trust the prev pointer of the head page, because:
832  *
833  * +----+       +-----+        +-----+
834  * |    |------>|  T  |---X--->|  N  |
835  * |    |<------|     |        |     |
836  * +----+       +-----+        +-----+
837  *   ^                           ^ |
838  *   |          +-----+          | |
839  *   +----------|  R  |----------+ |
840  *              |     |<-----------+
841  *              +-----+
842  *
843  * Key:  ---X-->  HEAD flag set in pointer
844  *         T      Tail page
845  *         R      Reader page
846  *         N      Next page
847  *
848  * (see __rb_reserve_next() to see where this happens)
849  *
850  *  What the above shows is that the reader just swapped out
851  *  the reader page with a page in the buffer, but before it
852  *  could make the new header point back to the new page added
853  *  it was preempted by a writer. The writer moved forward onto
854  *  the new page added by the reader and is about to move forward
855  *  again.
856  *
857  *  You can see, it is legitimate for the previous pointer of
858  *  the head (or any page) not to point back to itself. But only
859  *  temporarily.
860  */
861
862 #define RB_PAGE_NORMAL          0UL
863 #define RB_PAGE_HEAD            1UL
864 #define RB_PAGE_UPDATE          2UL
865
866
867 #define RB_FLAG_MASK            3UL
868
869 /* PAGE_MOVED is not part of the mask */
870 #define RB_PAGE_MOVED           4UL
871
872 /*
873  * rb_list_head - remove any bit
874  */
875 static struct list_head *rb_list_head(struct list_head *list)
876 {
877         unsigned long val = (unsigned long)list;
878
879         return (struct list_head *)(val & ~RB_FLAG_MASK);
880 }
881
882 /*
883  * rb_is_head_page - test if the given page is the head page
884  *
885  * Because the reader may move the head_page pointer, we can
886  * not trust what the head page is (it may be pointing to
887  * the reader page). But if the next page is a header page,
888  * its flags will be non zero.
889  */
890 static inline int
891 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
892                 struct buffer_page *page, struct list_head *list)
893 {
894         unsigned long val;
895
896         val = (unsigned long)list->next;
897
898         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
899                 return RB_PAGE_MOVED;
900
901         return val & RB_FLAG_MASK;
902 }
903
904 /*
905  * rb_is_reader_page
906  *
907  * The unique thing about the reader page, is that, if the
908  * writer is ever on it, the previous pointer never points
909  * back to the reader page.
910  */
911 static bool rb_is_reader_page(struct buffer_page *page)
912 {
913         struct list_head *list = page->list.prev;
914
915         return rb_list_head(list->next) != &page->list;
916 }
917
918 /*
919  * rb_set_list_to_head - set a list_head to be pointing to head.
920  */
921 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
922                                 struct list_head *list)
923 {
924         unsigned long *ptr;
925
926         ptr = (unsigned long *)&list->next;
927         *ptr |= RB_PAGE_HEAD;
928         *ptr &= ~RB_PAGE_UPDATE;
929 }
930
931 /*
932  * rb_head_page_activate - sets up head page
933  */
934 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
935 {
936         struct buffer_page *head;
937
938         head = cpu_buffer->head_page;
939         if (!head)
940                 return;
941
942         /*
943          * Set the previous list pointer to have the HEAD flag.
944          */
945         rb_set_list_to_head(cpu_buffer, head->list.prev);
946 }
947
948 static void rb_list_head_clear(struct list_head *list)
949 {
950         unsigned long *ptr = (unsigned long *)&list->next;
951
952         *ptr &= ~RB_FLAG_MASK;
953 }
954
955 /*
956  * rb_head_page_deactivate - clears head page ptr (for free list)
957  */
958 static void
959 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
960 {
961         struct list_head *hd;
962
963         /* Go through the whole list and clear any pointers found. */
964         rb_list_head_clear(cpu_buffer->pages);
965
966         list_for_each(hd, cpu_buffer->pages)
967                 rb_list_head_clear(hd);
968 }
969
970 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
971                             struct buffer_page *head,
972                             struct buffer_page *prev,
973                             int old_flag, int new_flag)
974 {
975         struct list_head *list;
976         unsigned long val = (unsigned long)&head->list;
977         unsigned long ret;
978
979         list = &prev->list;
980
981         val &= ~RB_FLAG_MASK;
982
983         ret = cmpxchg((unsigned long *)&list->next,
984                       val | old_flag, val | new_flag);
985
986         /* check if the reader took the page */
987         if ((ret & ~RB_FLAG_MASK) != val)
988                 return RB_PAGE_MOVED;
989
990         return ret & RB_FLAG_MASK;
991 }
992
993 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
994                                    struct buffer_page *head,
995                                    struct buffer_page *prev,
996                                    int old_flag)
997 {
998         return rb_head_page_set(cpu_buffer, head, prev,
999                                 old_flag, RB_PAGE_UPDATE);
1000 }
1001
1002 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1003                                  struct buffer_page *head,
1004                                  struct buffer_page *prev,
1005                                  int old_flag)
1006 {
1007         return rb_head_page_set(cpu_buffer, head, prev,
1008                                 old_flag, RB_PAGE_HEAD);
1009 }
1010
1011 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1012                                    struct buffer_page *head,
1013                                    struct buffer_page *prev,
1014                                    int old_flag)
1015 {
1016         return rb_head_page_set(cpu_buffer, head, prev,
1017                                 old_flag, RB_PAGE_NORMAL);
1018 }
1019
1020 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
1021                                struct buffer_page **bpage)
1022 {
1023         struct list_head *p = rb_list_head((*bpage)->list.next);
1024
1025         *bpage = list_entry(p, struct buffer_page, list);
1026 }
1027
1028 static struct buffer_page *
1029 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1030 {
1031         struct buffer_page *head;
1032         struct buffer_page *page;
1033         struct list_head *list;
1034         int i;
1035
1036         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1037                 return NULL;
1038
1039         /* sanity check */
1040         list = cpu_buffer->pages;
1041         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1042                 return NULL;
1043
1044         page = head = cpu_buffer->head_page;
1045         /*
1046          * It is possible that the writer moves the header behind
1047          * where we started, and we miss in one loop.
1048          * A second loop should grab the header, but we'll do
1049          * three loops just because I'm paranoid.
1050          */
1051         for (i = 0; i < 3; i++) {
1052                 do {
1053                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1054                                 cpu_buffer->head_page = page;
1055                                 return page;
1056                         }
1057                         rb_inc_page(cpu_buffer, &page);
1058                 } while (page != head);
1059         }
1060
1061         RB_WARN_ON(cpu_buffer, 1);
1062
1063         return NULL;
1064 }
1065
1066 static int rb_head_page_replace(struct buffer_page *old,
1067                                 struct buffer_page *new)
1068 {
1069         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1070         unsigned long val;
1071         unsigned long ret;
1072
1073         val = *ptr & ~RB_FLAG_MASK;
1074         val |= RB_PAGE_HEAD;
1075
1076         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1077
1078         return ret == val;
1079 }
1080
1081 /*
1082  * rb_tail_page_update - move the tail page forward
1083  */
1084 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1085                                struct buffer_page *tail_page,
1086                                struct buffer_page *next_page)
1087 {
1088         unsigned long old_entries;
1089         unsigned long old_write;
1090
1091         /*
1092          * The tail page now needs to be moved forward.
1093          *
1094          * We need to reset the tail page, but without messing
1095          * with possible erasing of data brought in by interrupts
1096          * that have moved the tail page and are currently on it.
1097          *
1098          * We add a counter to the write field to denote this.
1099          */
1100         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1101         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1102
1103         local_inc(&cpu_buffer->pages_touched);
1104         /*
1105          * Just make sure we have seen our old_write and synchronize
1106          * with any interrupts that come in.
1107          */
1108         barrier();
1109
1110         /*
1111          * If the tail page is still the same as what we think
1112          * it is, then it is up to us to update the tail
1113          * pointer.
1114          */
1115         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1116                 /* Zero the write counter */
1117                 unsigned long val = old_write & ~RB_WRITE_MASK;
1118                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1119
1120                 /*
1121                  * This will only succeed if an interrupt did
1122                  * not come in and change it. In which case, we
1123                  * do not want to modify it.
1124                  *
1125                  * We add (void) to let the compiler know that we do not care
1126                  * about the return value of these functions. We use the
1127                  * cmpxchg to only update if an interrupt did not already
1128                  * do it for us. If the cmpxchg fails, we don't care.
1129                  */
1130                 (void)local_cmpxchg(&next_page->write, old_write, val);
1131                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1132
1133                 /*
1134                  * No need to worry about races with clearing out the commit.
1135                  * it only can increment when a commit takes place. But that
1136                  * only happens in the outer most nested commit.
1137                  */
1138                 local_set(&next_page->page->commit, 0);
1139
1140                 /* Again, either we update tail_page or an interrupt does */
1141                 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1142         }
1143 }
1144
1145 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1146                           struct buffer_page *bpage)
1147 {
1148         unsigned long val = (unsigned long)bpage;
1149
1150         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1151                 return 1;
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * rb_check_list - make sure a pointer to a list has the last bits zero
1158  */
1159 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1160                          struct list_head *list)
1161 {
1162         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1163                 return 1;
1164         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1165                 return 1;
1166         return 0;
1167 }
1168
1169 /**
1170  * rb_check_pages - integrity check of buffer pages
1171  * @cpu_buffer: CPU buffer with pages to test
1172  *
1173  * As a safety measure we check to make sure the data pages have not
1174  * been corrupted.
1175  */
1176 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1177 {
1178         struct list_head *head = cpu_buffer->pages;
1179         struct buffer_page *bpage, *tmp;
1180
1181         /* Reset the head page if it exists */
1182         if (cpu_buffer->head_page)
1183                 rb_set_head_page(cpu_buffer);
1184
1185         rb_head_page_deactivate(cpu_buffer);
1186
1187         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1188                 return -1;
1189         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1190                 return -1;
1191
1192         if (rb_check_list(cpu_buffer, head))
1193                 return -1;
1194
1195         list_for_each_entry_safe(bpage, tmp, head, list) {
1196                 if (RB_WARN_ON(cpu_buffer,
1197                                bpage->list.next->prev != &bpage->list))
1198                         return -1;
1199                 if (RB_WARN_ON(cpu_buffer,
1200                                bpage->list.prev->next != &bpage->list))
1201                         return -1;
1202                 if (rb_check_list(cpu_buffer, &bpage->list))
1203                         return -1;
1204         }
1205
1206         rb_head_page_activate(cpu_buffer);
1207
1208         return 0;
1209 }
1210
1211 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1212 {
1213         struct buffer_page *bpage, *tmp;
1214         bool user_thread = current->mm != NULL;
1215         gfp_t mflags;
1216         long i;
1217
1218         /*
1219          * Check if the available memory is there first.
1220          * Note, si_mem_available() only gives us a rough estimate of available
1221          * memory. It may not be accurate. But we don't care, we just want
1222          * to prevent doing any allocation when it is obvious that it is
1223          * not going to succeed.
1224          */
1225         i = si_mem_available();
1226         if (i < nr_pages)
1227                 return -ENOMEM;
1228
1229         /*
1230          * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1231          * gracefully without invoking oom-killer and the system is not
1232          * destabilized.
1233          */
1234         mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1235
1236         /*
1237          * If a user thread allocates too much, and si_mem_available()
1238          * reports there's enough memory, even though there is not.
1239          * Make sure the OOM killer kills this thread. This can happen
1240          * even with RETRY_MAYFAIL because another task may be doing
1241          * an allocation after this task has taken all memory.
1242          * This is the task the OOM killer needs to take out during this
1243          * loop, even if it was triggered by an allocation somewhere else.
1244          */
1245         if (user_thread)
1246                 set_current_oom_origin();
1247         for (i = 0; i < nr_pages; i++) {
1248                 struct page *page;
1249
1250                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1251                                     mflags, cpu_to_node(cpu));
1252                 if (!bpage)
1253                         goto free_pages;
1254
1255                 list_add(&bpage->list, pages);
1256
1257                 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1258                 if (!page)
1259                         goto free_pages;
1260                 bpage->page = page_address(page);
1261                 rb_init_page(bpage->page);
1262
1263                 if (user_thread && fatal_signal_pending(current))
1264                         goto free_pages;
1265         }
1266         if (user_thread)
1267                 clear_current_oom_origin();
1268
1269         return 0;
1270
1271 free_pages:
1272         list_for_each_entry_safe(bpage, tmp, pages, list) {
1273                 list_del_init(&bpage->list);
1274                 free_buffer_page(bpage);
1275         }
1276         if (user_thread)
1277                 clear_current_oom_origin();
1278
1279         return -ENOMEM;
1280 }
1281
1282 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1283                              unsigned long nr_pages)
1284 {
1285         LIST_HEAD(pages);
1286
1287         WARN_ON(!nr_pages);
1288
1289         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1290                 return -ENOMEM;
1291
1292         /*
1293          * The ring buffer page list is a circular list that does not
1294          * start and end with a list head. All page list items point to
1295          * other pages.
1296          */
1297         cpu_buffer->pages = pages.next;
1298         list_del(&pages);
1299
1300         cpu_buffer->nr_pages = nr_pages;
1301
1302         rb_check_pages(cpu_buffer);
1303
1304         return 0;
1305 }
1306
1307 static struct ring_buffer_per_cpu *
1308 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1309 {
1310         struct ring_buffer_per_cpu *cpu_buffer;
1311         struct buffer_page *bpage;
1312         struct page *page;
1313         int ret;
1314
1315         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1316                                   GFP_KERNEL, cpu_to_node(cpu));
1317         if (!cpu_buffer)
1318                 return NULL;
1319
1320         cpu_buffer->cpu = cpu;
1321         cpu_buffer->buffer = buffer;
1322         raw_spin_lock_init(&cpu_buffer->reader_lock);
1323         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1324         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1325         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1326         init_completion(&cpu_buffer->update_done);
1327         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1328         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1329         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1330
1331         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1332                             GFP_KERNEL, cpu_to_node(cpu));
1333         if (!bpage)
1334                 goto fail_free_buffer;
1335
1336         rb_check_bpage(cpu_buffer, bpage);
1337
1338         cpu_buffer->reader_page = bpage;
1339         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1340         if (!page)
1341                 goto fail_free_reader;
1342         bpage->page = page_address(page);
1343         rb_init_page(bpage->page);
1344
1345         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1346         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1347
1348         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1349         if (ret < 0)
1350                 goto fail_free_reader;
1351
1352         cpu_buffer->head_page
1353                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1354         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1355
1356         rb_head_page_activate(cpu_buffer);
1357
1358         return cpu_buffer;
1359
1360  fail_free_reader:
1361         free_buffer_page(cpu_buffer->reader_page);
1362
1363  fail_free_buffer:
1364         kfree(cpu_buffer);
1365         return NULL;
1366 }
1367
1368 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1369 {
1370         struct list_head *head = cpu_buffer->pages;
1371         struct buffer_page *bpage, *tmp;
1372
1373         free_buffer_page(cpu_buffer->reader_page);
1374
1375         rb_head_page_deactivate(cpu_buffer);
1376
1377         if (head) {
1378                 list_for_each_entry_safe(bpage, tmp, head, list) {
1379                         list_del_init(&bpage->list);
1380                         free_buffer_page(bpage);
1381                 }
1382                 bpage = list_entry(head, struct buffer_page, list);
1383                 free_buffer_page(bpage);
1384         }
1385
1386         kfree(cpu_buffer);
1387 }
1388
1389 /**
1390  * __ring_buffer_alloc - allocate a new ring_buffer
1391  * @size: the size in bytes per cpu that is needed.
1392  * @flags: attributes to set for the ring buffer.
1393  *
1394  * Currently the only flag that is available is the RB_FL_OVERWRITE
1395  * flag. This flag means that the buffer will overwrite old data
1396  * when the buffer wraps. If this flag is not set, the buffer will
1397  * drop data when the tail hits the head.
1398  */
1399 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1400                                         struct lock_class_key *key)
1401 {
1402         struct ring_buffer *buffer;
1403         long nr_pages;
1404         int bsize;
1405         int cpu;
1406         int ret;
1407
1408         /* keep it in its own cache line */
1409         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1410                          GFP_KERNEL);
1411         if (!buffer)
1412                 return NULL;
1413
1414         if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1415                 goto fail_free_buffer;
1416
1417         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1418         buffer->flags = flags;
1419         buffer->clock = trace_clock_local;
1420         buffer->reader_lock_key = key;
1421
1422         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1423         init_waitqueue_head(&buffer->irq_work.waiters);
1424
1425         /* need at least two pages */
1426         if (nr_pages < 2)
1427                 nr_pages = 2;
1428
1429         buffer->cpus = nr_cpu_ids;
1430
1431         bsize = sizeof(void *) * nr_cpu_ids;
1432         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1433                                   GFP_KERNEL);
1434         if (!buffer->buffers)
1435                 goto fail_free_cpumask;
1436
1437         cpu = raw_smp_processor_id();
1438         cpumask_set_cpu(cpu, buffer->cpumask);
1439         buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1440         if (!buffer->buffers[cpu])
1441                 goto fail_free_buffers;
1442
1443         ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1444         if (ret < 0)
1445                 goto fail_free_buffers;
1446
1447         mutex_init(&buffer->mutex);
1448
1449         return buffer;
1450
1451  fail_free_buffers:
1452         for_each_buffer_cpu(buffer, cpu) {
1453                 if (buffer->buffers[cpu])
1454                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1455         }
1456         kfree(buffer->buffers);
1457
1458  fail_free_cpumask:
1459         free_cpumask_var(buffer->cpumask);
1460
1461  fail_free_buffer:
1462         kfree(buffer);
1463         return NULL;
1464 }
1465 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1466
1467 /**
1468  * ring_buffer_free - free a ring buffer.
1469  * @buffer: the buffer to free.
1470  */
1471 void
1472 ring_buffer_free(struct ring_buffer *buffer)
1473 {
1474         int cpu;
1475
1476         cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1477
1478         for_each_buffer_cpu(buffer, cpu)
1479                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1480
1481         kfree(buffer->buffers);
1482         free_cpumask_var(buffer->cpumask);
1483
1484         kfree(buffer);
1485 }
1486 EXPORT_SYMBOL_GPL(ring_buffer_free);
1487
1488 void ring_buffer_set_clock(struct ring_buffer *buffer,
1489                            u64 (*clock)(void))
1490 {
1491         buffer->clock = clock;
1492 }
1493
1494 void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
1495 {
1496         buffer->time_stamp_abs = abs;
1497 }
1498
1499 bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
1500 {
1501         return buffer->time_stamp_abs;
1502 }
1503
1504 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1505
1506 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1507 {
1508         return local_read(&bpage->entries) & RB_WRITE_MASK;
1509 }
1510
1511 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1512 {
1513         return local_read(&bpage->write) & RB_WRITE_MASK;
1514 }
1515
1516 static int
1517 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1518 {
1519         struct list_head *tail_page, *to_remove, *next_page;
1520         struct buffer_page *to_remove_page, *tmp_iter_page;
1521         struct buffer_page *last_page, *first_page;
1522         unsigned long nr_removed;
1523         unsigned long head_bit;
1524         int page_entries;
1525
1526         head_bit = 0;
1527
1528         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1529         atomic_inc(&cpu_buffer->record_disabled);
1530         /*
1531          * We don't race with the readers since we have acquired the reader
1532          * lock. We also don't race with writers after disabling recording.
1533          * This makes it easy to figure out the first and the last page to be
1534          * removed from the list. We unlink all the pages in between including
1535          * the first and last pages. This is done in a busy loop so that we
1536          * lose the least number of traces.
1537          * The pages are freed after we restart recording and unlock readers.
1538          */
1539         tail_page = &cpu_buffer->tail_page->list;
1540
1541         /*
1542          * tail page might be on reader page, we remove the next page
1543          * from the ring buffer
1544          */
1545         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1546                 tail_page = rb_list_head(tail_page->next);
1547         to_remove = tail_page;
1548
1549         /* start of pages to remove */
1550         first_page = list_entry(rb_list_head(to_remove->next),
1551                                 struct buffer_page, list);
1552
1553         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1554                 to_remove = rb_list_head(to_remove)->next;
1555                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1556         }
1557
1558         next_page = rb_list_head(to_remove)->next;
1559
1560         /*
1561          * Now we remove all pages between tail_page and next_page.
1562          * Make sure that we have head_bit value preserved for the
1563          * next page
1564          */
1565         tail_page->next = (struct list_head *)((unsigned long)next_page |
1566                                                 head_bit);
1567         next_page = rb_list_head(next_page);
1568         next_page->prev = tail_page;
1569
1570         /* make sure pages points to a valid page in the ring buffer */
1571         cpu_buffer->pages = next_page;
1572
1573         /* update head page */
1574         if (head_bit)
1575                 cpu_buffer->head_page = list_entry(next_page,
1576                                                 struct buffer_page, list);
1577
1578         /*
1579          * change read pointer to make sure any read iterators reset
1580          * themselves
1581          */
1582         cpu_buffer->read = 0;
1583
1584         /* pages are removed, resume tracing and then free the pages */
1585         atomic_dec(&cpu_buffer->record_disabled);
1586         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1587
1588         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1589
1590         /* last buffer page to remove */
1591         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1592                                 list);
1593         tmp_iter_page = first_page;
1594
1595         do {
1596                 cond_resched();
1597
1598                 to_remove_page = tmp_iter_page;
1599                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1600
1601                 /* update the counters */
1602                 page_entries = rb_page_entries(to_remove_page);
1603                 if (page_entries) {
1604                         /*
1605                          * If something was added to this page, it was full
1606                          * since it is not the tail page. So we deduct the
1607                          * bytes consumed in ring buffer from here.
1608                          * Increment overrun to account for the lost events.
1609                          */
1610                         local_add(page_entries, &cpu_buffer->overrun);
1611                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1612                 }
1613
1614                 /*
1615                  * We have already removed references to this list item, just
1616                  * free up the buffer_page and its page
1617                  */
1618                 free_buffer_page(to_remove_page);
1619                 nr_removed--;
1620
1621         } while (to_remove_page != last_page);
1622
1623         RB_WARN_ON(cpu_buffer, nr_removed);
1624
1625         return nr_removed == 0;
1626 }
1627
1628 static int
1629 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1630 {
1631         struct list_head *pages = &cpu_buffer->new_pages;
1632         int retries, success;
1633
1634         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1635         /*
1636          * We are holding the reader lock, so the reader page won't be swapped
1637          * in the ring buffer. Now we are racing with the writer trying to
1638          * move head page and the tail page.
1639          * We are going to adapt the reader page update process where:
1640          * 1. We first splice the start and end of list of new pages between
1641          *    the head page and its previous page.
1642          * 2. We cmpxchg the prev_page->next to point from head page to the
1643          *    start of new pages list.
1644          * 3. Finally, we update the head->prev to the end of new list.
1645          *
1646          * We will try this process 10 times, to make sure that we don't keep
1647          * spinning.
1648          */
1649         retries = 10;
1650         success = 0;
1651         while (retries--) {
1652                 struct list_head *head_page, *prev_page, *r;
1653                 struct list_head *last_page, *first_page;
1654                 struct list_head *head_page_with_bit;
1655
1656                 head_page = &rb_set_head_page(cpu_buffer)->list;
1657                 if (!head_page)
1658                         break;
1659                 prev_page = head_page->prev;
1660
1661                 first_page = pages->next;
1662                 last_page  = pages->prev;
1663
1664                 head_page_with_bit = (struct list_head *)
1665                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1666
1667                 last_page->next = head_page_with_bit;
1668                 first_page->prev = prev_page;
1669
1670                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1671
1672                 if (r == head_page_with_bit) {
1673                         /*
1674                          * yay, we replaced the page pointer to our new list,
1675                          * now, we just have to update to head page's prev
1676                          * pointer to point to end of list
1677                          */
1678                         head_page->prev = last_page;
1679                         success = 1;
1680                         break;
1681                 }
1682         }
1683
1684         if (success)
1685                 INIT_LIST_HEAD(pages);
1686         /*
1687          * If we weren't successful in adding in new pages, warn and stop
1688          * tracing
1689          */
1690         RB_WARN_ON(cpu_buffer, !success);
1691         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1692
1693         /* free pages if they weren't inserted */
1694         if (!success) {
1695                 struct buffer_page *bpage, *tmp;
1696                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1697                                          list) {
1698                         list_del_init(&bpage->list);
1699                         free_buffer_page(bpage);
1700                 }
1701         }
1702         return success;
1703 }
1704
1705 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1706 {
1707         int success;
1708
1709         if (cpu_buffer->nr_pages_to_update > 0)
1710                 success = rb_insert_pages(cpu_buffer);
1711         else
1712                 success = rb_remove_pages(cpu_buffer,
1713                                         -cpu_buffer->nr_pages_to_update);
1714
1715         if (success)
1716                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1717 }
1718
1719 static void update_pages_handler(struct work_struct *work)
1720 {
1721         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1722                         struct ring_buffer_per_cpu, update_pages_work);
1723         rb_update_pages(cpu_buffer);
1724         complete(&cpu_buffer->update_done);
1725 }
1726
1727 /**
1728  * ring_buffer_resize - resize the ring buffer
1729  * @buffer: the buffer to resize.
1730  * @size: the new size.
1731  * @cpu_id: the cpu buffer to resize
1732  *
1733  * Minimum size is 2 * BUF_PAGE_SIZE.
1734  *
1735  * Returns 0 on success and < 0 on failure.
1736  */
1737 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1738                         int cpu_id)
1739 {
1740         struct ring_buffer_per_cpu *cpu_buffer;
1741         unsigned long nr_pages;
1742         int cpu, err = 0;
1743
1744         /*
1745          * Always succeed at resizing a non-existent buffer:
1746          */
1747         if (!buffer)
1748                 return size;
1749
1750         /* Make sure the requested buffer exists */
1751         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1752             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1753                 return size;
1754
1755         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1756
1757         /* we need a minimum of two pages */
1758         if (nr_pages < 2)
1759                 nr_pages = 2;
1760
1761         size = nr_pages * BUF_PAGE_SIZE;
1762
1763         /*
1764          * Don't succeed if resizing is disabled, as a reader might be
1765          * manipulating the ring buffer and is expecting a sane state while
1766          * this is true.
1767          */
1768         if (atomic_read(&buffer->resize_disabled))
1769                 return -EBUSY;
1770
1771         /* prevent another thread from changing buffer sizes */
1772         mutex_lock(&buffer->mutex);
1773
1774         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1775                 /* calculate the pages to update */
1776                 for_each_buffer_cpu(buffer, cpu) {
1777                         cpu_buffer = buffer->buffers[cpu];
1778
1779                         cpu_buffer->nr_pages_to_update = nr_pages -
1780                                                         cpu_buffer->nr_pages;
1781                         /*
1782                          * nothing more to do for removing pages or no update
1783                          */
1784                         if (cpu_buffer->nr_pages_to_update <= 0)
1785                                 continue;
1786                         /*
1787                          * to add pages, make sure all new pages can be
1788                          * allocated without receiving ENOMEM
1789                          */
1790                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1791                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1792                                                 &cpu_buffer->new_pages, cpu)) {
1793                                 /* not enough memory for new pages */
1794                                 err = -ENOMEM;
1795                                 goto out_err;
1796                         }
1797                 }
1798
1799                 get_online_cpus();
1800                 /*
1801                  * Fire off all the required work handlers
1802                  * We can't schedule on offline CPUs, but it's not necessary
1803                  * since we can change their buffer sizes without any race.
1804                  */
1805                 for_each_buffer_cpu(buffer, cpu) {
1806                         cpu_buffer = buffer->buffers[cpu];
1807                         if (!cpu_buffer->nr_pages_to_update)
1808                                 continue;
1809
1810                         /* Can't run something on an offline CPU. */
1811                         if (!cpu_online(cpu)) {
1812                                 rb_update_pages(cpu_buffer);
1813                                 cpu_buffer->nr_pages_to_update = 0;
1814                         } else {
1815                                 schedule_work_on(cpu,
1816                                                 &cpu_buffer->update_pages_work);
1817                         }
1818                 }
1819
1820                 /* wait for all the updates to complete */
1821                 for_each_buffer_cpu(buffer, cpu) {
1822                         cpu_buffer = buffer->buffers[cpu];
1823                         if (!cpu_buffer->nr_pages_to_update)
1824                                 continue;
1825
1826                         if (cpu_online(cpu))
1827                                 wait_for_completion(&cpu_buffer->update_done);
1828                         cpu_buffer->nr_pages_to_update = 0;
1829                 }
1830
1831                 put_online_cpus();
1832         } else {
1833                 /* Make sure this CPU has been initialized */
1834                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1835                         goto out;
1836
1837                 cpu_buffer = buffer->buffers[cpu_id];
1838
1839                 if (nr_pages == cpu_buffer->nr_pages)
1840                         goto out;
1841
1842                 cpu_buffer->nr_pages_to_update = nr_pages -
1843                                                 cpu_buffer->nr_pages;
1844
1845                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1846                 if (cpu_buffer->nr_pages_to_update > 0 &&
1847                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1848                                             &cpu_buffer->new_pages, cpu_id)) {
1849                         err = -ENOMEM;
1850                         goto out_err;
1851                 }
1852
1853                 get_online_cpus();
1854
1855                 /* Can't run something on an offline CPU. */
1856                 if (!cpu_online(cpu_id))
1857                         rb_update_pages(cpu_buffer);
1858                 else {
1859                         schedule_work_on(cpu_id,
1860                                          &cpu_buffer->update_pages_work);
1861                         wait_for_completion(&cpu_buffer->update_done);
1862                 }
1863
1864                 cpu_buffer->nr_pages_to_update = 0;
1865                 put_online_cpus();
1866         }
1867
1868  out:
1869         /*
1870          * The ring buffer resize can happen with the ring buffer
1871          * enabled, so that the update disturbs the tracing as little
1872          * as possible. But if the buffer is disabled, we do not need
1873          * to worry about that, and we can take the time to verify
1874          * that the buffer is not corrupt.
1875          */
1876         if (atomic_read(&buffer->record_disabled)) {
1877                 atomic_inc(&buffer->record_disabled);
1878                 /*
1879                  * Even though the buffer was disabled, we must make sure
1880                  * that it is truly disabled before calling rb_check_pages.
1881                  * There could have been a race between checking
1882                  * record_disable and incrementing it.
1883                  */
1884                 synchronize_rcu();
1885                 for_each_buffer_cpu(buffer, cpu) {
1886                         cpu_buffer = buffer->buffers[cpu];
1887                         rb_check_pages(cpu_buffer);
1888                 }
1889                 atomic_dec(&buffer->record_disabled);
1890         }
1891
1892         mutex_unlock(&buffer->mutex);
1893         return size;
1894
1895  out_err:
1896         for_each_buffer_cpu(buffer, cpu) {
1897                 struct buffer_page *bpage, *tmp;
1898
1899                 cpu_buffer = buffer->buffers[cpu];
1900                 cpu_buffer->nr_pages_to_update = 0;
1901
1902                 if (list_empty(&cpu_buffer->new_pages))
1903                         continue;
1904
1905                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1906                                         list) {
1907                         list_del_init(&bpage->list);
1908                         free_buffer_page(bpage);
1909                 }
1910         }
1911         mutex_unlock(&buffer->mutex);
1912         return err;
1913 }
1914 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1915
1916 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1917 {
1918         mutex_lock(&buffer->mutex);
1919         if (val)
1920                 buffer->flags |= RB_FL_OVERWRITE;
1921         else
1922                 buffer->flags &= ~RB_FL_OVERWRITE;
1923         mutex_unlock(&buffer->mutex);
1924 }
1925 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1926
1927 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1928 {
1929         return bpage->page->data + index;
1930 }
1931
1932 static __always_inline struct ring_buffer_event *
1933 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1934 {
1935         return __rb_page_index(cpu_buffer->reader_page,
1936                                cpu_buffer->reader_page->read);
1937 }
1938
1939 static __always_inline struct ring_buffer_event *
1940 rb_iter_head_event(struct ring_buffer_iter *iter)
1941 {
1942         return __rb_page_index(iter->head_page, iter->head);
1943 }
1944
1945 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
1946 {
1947         return local_read(&bpage->page->commit);
1948 }
1949
1950 /* Size is determined by what has been committed */
1951 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1952 {
1953         return rb_page_commit(bpage);
1954 }
1955
1956 static __always_inline unsigned
1957 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1958 {
1959         return rb_page_commit(cpu_buffer->commit_page);
1960 }
1961
1962 static __always_inline unsigned
1963 rb_event_index(struct ring_buffer_event *event)
1964 {
1965         unsigned long addr = (unsigned long)event;
1966
1967         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1968 }
1969
1970 static void rb_inc_iter(struct ring_buffer_iter *iter)
1971 {
1972         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1973
1974         /*
1975          * The iterator could be on the reader page (it starts there).
1976          * But the head could have moved, since the reader was
1977          * found. Check for this case and assign the iterator
1978          * to the head page instead of next.
1979          */
1980         if (iter->head_page == cpu_buffer->reader_page)
1981                 iter->head_page = rb_set_head_page(cpu_buffer);
1982         else
1983                 rb_inc_page(cpu_buffer, &iter->head_page);
1984
1985         iter->read_stamp = iter->head_page->page->time_stamp;
1986         iter->head = 0;
1987 }
1988
1989 /*
1990  * rb_handle_head_page - writer hit the head page
1991  *
1992  * Returns: +1 to retry page
1993  *           0 to continue
1994  *          -1 on error
1995  */
1996 static int
1997 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1998                     struct buffer_page *tail_page,
1999                     struct buffer_page *next_page)
2000 {
2001         struct buffer_page *new_head;
2002         int entries;
2003         int type;
2004         int ret;
2005
2006         entries = rb_page_entries(next_page);
2007
2008         /*
2009          * The hard part is here. We need to move the head
2010          * forward, and protect against both readers on
2011          * other CPUs and writers coming in via interrupts.
2012          */
2013         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2014                                        RB_PAGE_HEAD);
2015
2016         /*
2017          * type can be one of four:
2018          *  NORMAL - an interrupt already moved it for us
2019          *  HEAD   - we are the first to get here.
2020          *  UPDATE - we are the interrupt interrupting
2021          *           a current move.
2022          *  MOVED  - a reader on another CPU moved the next
2023          *           pointer to its reader page. Give up
2024          *           and try again.
2025          */
2026
2027         switch (type) {
2028         case RB_PAGE_HEAD:
2029                 /*
2030                  * We changed the head to UPDATE, thus
2031                  * it is our responsibility to update
2032                  * the counters.
2033                  */
2034                 local_add(entries, &cpu_buffer->overrun);
2035                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2036
2037                 /*
2038                  * The entries will be zeroed out when we move the
2039                  * tail page.
2040                  */
2041
2042                 /* still more to do */
2043                 break;
2044
2045         case RB_PAGE_UPDATE:
2046                 /*
2047                  * This is an interrupt that interrupt the
2048                  * previous update. Still more to do.
2049                  */
2050                 break;
2051         case RB_PAGE_NORMAL:
2052                 /*
2053                  * An interrupt came in before the update
2054                  * and processed this for us.
2055                  * Nothing left to do.
2056                  */
2057                 return 1;
2058         case RB_PAGE_MOVED:
2059                 /*
2060                  * The reader is on another CPU and just did
2061                  * a swap with our next_page.
2062                  * Try again.
2063                  */
2064                 return 1;
2065         default:
2066                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2067                 return -1;
2068         }
2069
2070         /*
2071          * Now that we are here, the old head pointer is
2072          * set to UPDATE. This will keep the reader from
2073          * swapping the head page with the reader page.
2074          * The reader (on another CPU) will spin till
2075          * we are finished.
2076          *
2077          * We just need to protect against interrupts
2078          * doing the job. We will set the next pointer
2079          * to HEAD. After that, we set the old pointer
2080          * to NORMAL, but only if it was HEAD before.
2081          * otherwise we are an interrupt, and only
2082          * want the outer most commit to reset it.
2083          */
2084         new_head = next_page;
2085         rb_inc_page(cpu_buffer, &new_head);
2086
2087         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2088                                     RB_PAGE_NORMAL);
2089
2090         /*
2091          * Valid returns are:
2092          *  HEAD   - an interrupt came in and already set it.
2093          *  NORMAL - One of two things:
2094          *            1) We really set it.
2095          *            2) A bunch of interrupts came in and moved
2096          *               the page forward again.
2097          */
2098         switch (ret) {
2099         case RB_PAGE_HEAD:
2100         case RB_PAGE_NORMAL:
2101                 /* OK */
2102                 break;
2103         default:
2104                 RB_WARN_ON(cpu_buffer, 1);
2105                 return -1;
2106         }
2107
2108         /*
2109          * It is possible that an interrupt came in,
2110          * set the head up, then more interrupts came in
2111          * and moved it again. When we get back here,
2112          * the page would have been set to NORMAL but we
2113          * just set it back to HEAD.
2114          *
2115          * How do you detect this? Well, if that happened
2116          * the tail page would have moved.
2117          */
2118         if (ret == RB_PAGE_NORMAL) {
2119                 struct buffer_page *buffer_tail_page;
2120
2121                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2122                 /*
2123                  * If the tail had moved passed next, then we need
2124                  * to reset the pointer.
2125                  */
2126                 if (buffer_tail_page != tail_page &&
2127                     buffer_tail_page != next_page)
2128                         rb_head_page_set_normal(cpu_buffer, new_head,
2129                                                 next_page,
2130                                                 RB_PAGE_HEAD);
2131         }
2132
2133         /*
2134          * If this was the outer most commit (the one that
2135          * changed the original pointer from HEAD to UPDATE),
2136          * then it is up to us to reset it to NORMAL.
2137          */
2138         if (type == RB_PAGE_HEAD) {
2139                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2140                                               tail_page,
2141                                               RB_PAGE_UPDATE);
2142                 if (RB_WARN_ON(cpu_buffer,
2143                                ret != RB_PAGE_UPDATE))
2144                         return -1;
2145         }
2146
2147         return 0;
2148 }
2149
2150 static inline void
2151 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2152               unsigned long tail, struct rb_event_info *info)
2153 {
2154         struct buffer_page *tail_page = info->tail_page;
2155         struct ring_buffer_event *event;
2156         unsigned long length = info->length;
2157
2158         /*
2159          * Only the event that crossed the page boundary
2160          * must fill the old tail_page with padding.
2161          */
2162         if (tail >= BUF_PAGE_SIZE) {
2163                 /*
2164                  * If the page was filled, then we still need
2165                  * to update the real_end. Reset it to zero
2166                  * and the reader will ignore it.
2167                  */
2168                 if (tail == BUF_PAGE_SIZE)
2169                         tail_page->real_end = 0;
2170
2171                 local_sub(length, &tail_page->write);
2172                 return;
2173         }
2174
2175         event = __rb_page_index(tail_page, tail);
2176
2177         /* account for padding bytes */
2178         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2179
2180         /*
2181          * Save the original length to the meta data.
2182          * This will be used by the reader to add lost event
2183          * counter.
2184          */
2185         tail_page->real_end = tail;
2186
2187         /*
2188          * If this event is bigger than the minimum size, then
2189          * we need to be careful that we don't subtract the
2190          * write counter enough to allow another writer to slip
2191          * in on this page.
2192          * We put in a discarded commit instead, to make sure
2193          * that this space is not used again.
2194          *
2195          * If we are less than the minimum size, we don't need to
2196          * worry about it.
2197          */
2198         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2199                 /* No room for any events */
2200
2201                 /* Mark the rest of the page with padding */
2202                 rb_event_set_padding(event);
2203
2204                 /* Set the write back to the previous setting */
2205                 local_sub(length, &tail_page->write);
2206                 return;
2207         }
2208
2209         /* Put in a discarded event */
2210         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2211         event->type_len = RINGBUF_TYPE_PADDING;
2212         /* time delta must be non zero */
2213         event->time_delta = 1;
2214
2215         /* Set write to end of buffer */
2216         length = (tail + length) - BUF_PAGE_SIZE;
2217         local_sub(length, &tail_page->write);
2218 }
2219
2220 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2221
2222 /*
2223  * This is the slow path, force gcc not to inline it.
2224  */
2225 static noinline struct ring_buffer_event *
2226 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2227              unsigned long tail, struct rb_event_info *info)
2228 {
2229         struct buffer_page *tail_page = info->tail_page;
2230         struct buffer_page *commit_page = cpu_buffer->commit_page;
2231         struct ring_buffer *buffer = cpu_buffer->buffer;
2232         struct buffer_page *next_page;
2233         int ret;
2234
2235         next_page = tail_page;
2236
2237         rb_inc_page(cpu_buffer, &next_page);
2238
2239         /*
2240          * If for some reason, we had an interrupt storm that made
2241          * it all the way around the buffer, bail, and warn
2242          * about it.
2243          */
2244         if (unlikely(next_page == commit_page)) {
2245                 local_inc(&cpu_buffer->commit_overrun);
2246                 goto out_reset;
2247         }
2248
2249         /*
2250          * This is where the fun begins!
2251          *
2252          * We are fighting against races between a reader that
2253          * could be on another CPU trying to swap its reader
2254          * page with the buffer head.
2255          *
2256          * We are also fighting against interrupts coming in and
2257          * moving the head or tail on us as well.
2258          *
2259          * If the next page is the head page then we have filled
2260          * the buffer, unless the commit page is still on the
2261          * reader page.
2262          */
2263         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2264
2265                 /*
2266                  * If the commit is not on the reader page, then
2267                  * move the header page.
2268                  */
2269                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2270                         /*
2271                          * If we are not in overwrite mode,
2272                          * this is easy, just stop here.
2273                          */
2274                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2275                                 local_inc(&cpu_buffer->dropped_events);
2276                                 goto out_reset;
2277                         }
2278
2279                         ret = rb_handle_head_page(cpu_buffer,
2280                                                   tail_page,
2281                                                   next_page);
2282                         if (ret < 0)
2283                                 goto out_reset;
2284                         if (ret)
2285                                 goto out_again;
2286                 } else {
2287                         /*
2288                          * We need to be careful here too. The
2289                          * commit page could still be on the reader
2290                          * page. We could have a small buffer, and
2291                          * have filled up the buffer with events
2292                          * from interrupts and such, and wrapped.
2293                          *
2294                          * Note, if the tail page is also the on the
2295                          * reader_page, we let it move out.
2296                          */
2297                         if (unlikely((cpu_buffer->commit_page !=
2298                                       cpu_buffer->tail_page) &&
2299                                      (cpu_buffer->commit_page ==
2300                                       cpu_buffer->reader_page))) {
2301                                 local_inc(&cpu_buffer->commit_overrun);
2302                                 goto out_reset;
2303                         }
2304                 }
2305         }
2306
2307         rb_tail_page_update(cpu_buffer, tail_page, next_page);
2308
2309  out_again:
2310
2311         rb_reset_tail(cpu_buffer, tail, info);
2312
2313         /* Commit what we have for now. */
2314         rb_end_commit(cpu_buffer);
2315         /* rb_end_commit() decs committing */
2316         local_inc(&cpu_buffer->committing);
2317
2318         /* fail and let the caller try again */
2319         return ERR_PTR(-EAGAIN);
2320
2321  out_reset:
2322         /* reset write */
2323         rb_reset_tail(cpu_buffer, tail, info);
2324
2325         return NULL;
2326 }
2327
2328 /* Slow path, do not inline */
2329 static noinline struct ring_buffer_event *
2330 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2331 {
2332         if (abs)
2333                 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2334         else
2335                 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2336
2337         /* Not the first event on the page, or not delta? */
2338         if (abs || rb_event_index(event)) {
2339                 event->time_delta = delta & TS_MASK;
2340                 event->array[0] = delta >> TS_SHIFT;
2341         } else {
2342                 /* nope, just zero it */
2343                 event->time_delta = 0;
2344                 event->array[0] = 0;
2345         }
2346
2347         return skip_time_extend(event);
2348 }
2349
2350 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2351                                      struct ring_buffer_event *event);
2352
2353 /**
2354  * rb_update_event - update event type and data
2355  * @event: the event to update
2356  * @type: the type of event
2357  * @length: the size of the event field in the ring buffer
2358  *
2359  * Update the type and data fields of the event. The length
2360  * is the actual size that is written to the ring buffer,
2361  * and with this, we can determine what to place into the
2362  * data field.
2363  */
2364 static void
2365 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2366                 struct ring_buffer_event *event,
2367                 struct rb_event_info *info)
2368 {
2369         unsigned length = info->length;
2370         u64 delta = info->delta;
2371
2372         /* Only a commit updates the timestamp */
2373         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2374                 delta = 0;
2375
2376         /*
2377          * If we need to add a timestamp, then we
2378          * add it to the start of the reserved space.
2379          */
2380         if (unlikely(info->add_timestamp)) {
2381                 bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
2382
2383                 event = rb_add_time_stamp(event, info->delta, abs);
2384                 length -= RB_LEN_TIME_EXTEND;
2385                 delta = 0;
2386         }
2387
2388         event->time_delta = delta;
2389         length -= RB_EVNT_HDR_SIZE;
2390         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2391                 event->type_len = 0;
2392                 event->array[0] = length;
2393         } else
2394                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2395 }
2396
2397 static unsigned rb_calculate_event_length(unsigned length)
2398 {
2399         struct ring_buffer_event event; /* Used only for sizeof array */
2400
2401         /* zero length can cause confusions */
2402         if (!length)
2403                 length++;
2404
2405         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2406                 length += sizeof(event.array[0]);
2407
2408         length += RB_EVNT_HDR_SIZE;
2409         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2410
2411         /*
2412          * In case the time delta is larger than the 27 bits for it
2413          * in the header, we need to add a timestamp. If another
2414          * event comes in when trying to discard this one to increase
2415          * the length, then the timestamp will be added in the allocated
2416          * space of this event. If length is bigger than the size needed
2417          * for the TIME_EXTEND, then padding has to be used. The events
2418          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2419          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2420          * As length is a multiple of 4, we only need to worry if it
2421          * is 12 (RB_LEN_TIME_EXTEND + 4).
2422          */
2423         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2424                 length += RB_ALIGNMENT;
2425
2426         return length;
2427 }
2428
2429 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2430 static inline bool sched_clock_stable(void)
2431 {
2432         return true;
2433 }
2434 #endif
2435
2436 static inline int
2437 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2438                   struct ring_buffer_event *event)
2439 {
2440         unsigned long new_index, old_index;
2441         struct buffer_page *bpage;
2442         unsigned long index;
2443         unsigned long addr;
2444
2445         new_index = rb_event_index(event);
2446         old_index = new_index + rb_event_ts_length(event);
2447         addr = (unsigned long)event;
2448         addr &= PAGE_MASK;
2449
2450         bpage = READ_ONCE(cpu_buffer->tail_page);
2451
2452         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2453                 unsigned long write_mask =
2454                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2455                 unsigned long event_length = rb_event_length(event);
2456                 /*
2457                  * This is on the tail page. It is possible that
2458                  * a write could come in and move the tail page
2459                  * and write to the next page. That is fine
2460                  * because we just shorten what is on this page.
2461                  */
2462                 old_index += write_mask;
2463                 new_index += write_mask;
2464                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2465                 if (index == old_index) {
2466                         /* update counters */
2467                         local_sub(event_length, &cpu_buffer->entries_bytes);
2468                         return 1;
2469                 }
2470         }
2471
2472         /* could not discard */
2473         return 0;
2474 }
2475
2476 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2477 {
2478         local_inc(&cpu_buffer->committing);
2479         local_inc(&cpu_buffer->commits);
2480 }
2481
2482 static __always_inline void
2483 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2484 {
2485         unsigned long max_count;
2486
2487         /*
2488          * We only race with interrupts and NMIs on this CPU.
2489          * If we own the commit event, then we can commit
2490          * all others that interrupted us, since the interruptions
2491          * are in stack format (they finish before they come
2492          * back to us). This allows us to do a simple loop to
2493          * assign the commit to the tail.
2494          */
2495  again:
2496         max_count = cpu_buffer->nr_pages * 100;
2497
2498         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2499                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2500                         return;
2501                 if (RB_WARN_ON(cpu_buffer,
2502                                rb_is_reader_page(cpu_buffer->tail_page)))
2503                         return;
2504                 local_set(&cpu_buffer->commit_page->page->commit,
2505                           rb_page_write(cpu_buffer->commit_page));
2506                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2507                 /* Only update the write stamp if the page has an event */
2508                 if (rb_page_write(cpu_buffer->commit_page))
2509                         cpu_buffer->write_stamp =
2510                                 cpu_buffer->commit_page->page->time_stamp;
2511                 /* add barrier to keep gcc from optimizing too much */
2512                 barrier();
2513         }
2514         while (rb_commit_index(cpu_buffer) !=
2515                rb_page_write(cpu_buffer->commit_page)) {
2516
2517                 local_set(&cpu_buffer->commit_page->page->commit,
2518                           rb_page_write(cpu_buffer->commit_page));
2519                 RB_WARN_ON(cpu_buffer,
2520                            local_read(&cpu_buffer->commit_page->page->commit) &
2521                            ~RB_WRITE_MASK);
2522                 barrier();
2523         }
2524
2525         /* again, keep gcc from optimizing */
2526         barrier();
2527
2528         /*
2529          * If an interrupt came in just after the first while loop
2530          * and pushed the tail page forward, we will be left with
2531          * a dangling commit that will never go forward.
2532          */
2533         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2534                 goto again;
2535 }
2536
2537 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2538 {
2539         unsigned long commits;
2540
2541         if (RB_WARN_ON(cpu_buffer,
2542                        !local_read(&cpu_buffer->committing)))
2543                 return;
2544
2545  again:
2546         commits = local_read(&cpu_buffer->commits);
2547         /* synchronize with interrupts */
2548         barrier();
2549         if (local_read(&cpu_buffer->committing) == 1)
2550                 rb_set_commit_to_write(cpu_buffer);
2551
2552         local_dec(&cpu_buffer->committing);
2553
2554         /* synchronize with interrupts */
2555         barrier();
2556
2557         /*
2558          * Need to account for interrupts coming in between the
2559          * updating of the commit page and the clearing of the
2560          * committing counter.
2561          */
2562         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2563             !local_read(&cpu_buffer->committing)) {
2564                 local_inc(&cpu_buffer->committing);
2565                 goto again;
2566         }
2567 }
2568
2569 static inline void rb_event_discard(struct ring_buffer_event *event)
2570 {
2571         if (extended_time(event))
2572                 event = skip_time_extend(event);
2573
2574         /* array[0] holds the actual length for the discarded event */
2575         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2576         event->type_len = RINGBUF_TYPE_PADDING;
2577         /* time delta must be non zero */
2578         if (!event->time_delta)
2579                 event->time_delta = 1;
2580 }
2581
2582 static __always_inline bool
2583 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2584                    struct ring_buffer_event *event)
2585 {
2586         unsigned long addr = (unsigned long)event;
2587         unsigned long index;
2588
2589         index = rb_event_index(event);
2590         addr &= PAGE_MASK;
2591
2592         return cpu_buffer->commit_page->page == (void *)addr &&
2593                 rb_commit_index(cpu_buffer) == index;
2594 }
2595
2596 static __always_inline void
2597 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2598                       struct ring_buffer_event *event)
2599 {
2600         u64 delta;
2601
2602         /*
2603          * The event first in the commit queue updates the
2604          * time stamp.
2605          */
2606         if (rb_event_is_commit(cpu_buffer, event)) {
2607                 /*
2608                  * A commit event that is first on a page
2609                  * updates the write timestamp with the page stamp
2610                  */
2611                 if (!rb_event_index(event))
2612                         cpu_buffer->write_stamp =
2613                                 cpu_buffer->commit_page->page->time_stamp;
2614                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2615                         delta = ring_buffer_event_time_stamp(event);
2616                         cpu_buffer->write_stamp += delta;
2617                 } else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
2618                         delta = ring_buffer_event_time_stamp(event);
2619                         cpu_buffer->write_stamp = delta;
2620                 } else
2621                         cpu_buffer->write_stamp += event->time_delta;
2622         }
2623 }
2624
2625 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2626                       struct ring_buffer_event *event)
2627 {
2628         local_inc(&cpu_buffer->entries);
2629         rb_update_write_stamp(cpu_buffer, event);
2630         rb_end_commit(cpu_buffer);
2631 }
2632
2633 static __always_inline void
2634 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2635 {
2636         size_t nr_pages;
2637         size_t dirty;
2638         size_t full;
2639
2640         if (buffer->irq_work.waiters_pending) {
2641                 buffer->irq_work.waiters_pending = false;
2642                 /* irq_work_queue() supplies it's own memory barriers */
2643                 irq_work_queue(&buffer->irq_work.work);
2644         }
2645
2646         if (cpu_buffer->irq_work.waiters_pending) {
2647                 cpu_buffer->irq_work.waiters_pending = false;
2648                 /* irq_work_queue() supplies it's own memory barriers */
2649                 irq_work_queue(&cpu_buffer->irq_work.work);
2650         }
2651
2652         if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2653                 return;
2654
2655         if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2656                 return;
2657
2658         if (!cpu_buffer->irq_work.full_waiters_pending)
2659                 return;
2660
2661         cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2662
2663         full = cpu_buffer->shortest_full;
2664         nr_pages = cpu_buffer->nr_pages;
2665         dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2666         if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2667                 return;
2668
2669         cpu_buffer->irq_work.wakeup_full = true;
2670         cpu_buffer->irq_work.full_waiters_pending = false;
2671         /* irq_work_queue() supplies it's own memory barriers */
2672         irq_work_queue(&cpu_buffer->irq_work.work);
2673 }
2674
2675 /*
2676  * The lock and unlock are done within a preempt disable section.
2677  * The current_context per_cpu variable can only be modified
2678  * by the current task between lock and unlock. But it can
2679  * be modified more than once via an interrupt. To pass this
2680  * information from the lock to the unlock without having to
2681  * access the 'in_interrupt()' functions again (which do show
2682  * a bit of overhead in something as critical as function tracing,
2683  * we use a bitmask trick.
2684  *
2685  *  bit 0 =  NMI context
2686  *  bit 1 =  IRQ context
2687  *  bit 2 =  SoftIRQ context
2688  *  bit 3 =  normal context.
2689  *
2690  * This works because this is the order of contexts that can
2691  * preempt other contexts. A SoftIRQ never preempts an IRQ
2692  * context.
2693  *
2694  * When the context is determined, the corresponding bit is
2695  * checked and set (if it was set, then a recursion of that context
2696  * happened).
2697  *
2698  * On unlock, we need to clear this bit. To do so, just subtract
2699  * 1 from the current_context and AND it to itself.
2700  *
2701  * (binary)
2702  *  101 - 1 = 100
2703  *  101 & 100 = 100 (clearing bit zero)
2704  *
2705  *  1010 - 1 = 1001
2706  *  1010 & 1001 = 1000 (clearing bit 1)
2707  *
2708  * The least significant bit can be cleared this way, and it
2709  * just so happens that it is the same bit corresponding to
2710  * the current context.
2711  */
2712
2713 static __always_inline int
2714 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2715 {
2716         unsigned int val = cpu_buffer->current_context;
2717         unsigned long pc = preempt_count();
2718         int bit;
2719
2720         if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2721                 bit = RB_CTX_NORMAL;
2722         else
2723                 bit = pc & NMI_MASK ? RB_CTX_NMI :
2724                         pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2725
2726         if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
2727                 return 1;
2728
2729         val |= (1 << (bit + cpu_buffer->nest));
2730         cpu_buffer->current_context = val;
2731
2732         return 0;
2733 }
2734
2735 static __always_inline void
2736 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2737 {
2738         cpu_buffer->current_context &=
2739                 cpu_buffer->current_context - (1 << cpu_buffer->nest);
2740 }
2741
2742 /* The recursive locking above uses 4 bits */
2743 #define NESTED_BITS 4
2744
2745 /**
2746  * ring_buffer_nest_start - Allow to trace while nested
2747  * @buffer: The ring buffer to modify
2748  *
2749  * The ring buffer has a safety mechanism to prevent recursion.
2750  * But there may be a case where a trace needs to be done while
2751  * tracing something else. In this case, calling this function
2752  * will allow this function to nest within a currently active
2753  * ring_buffer_lock_reserve().
2754  *
2755  * Call this function before calling another ring_buffer_lock_reserve() and
2756  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
2757  */
2758 void ring_buffer_nest_start(struct ring_buffer *buffer)
2759 {
2760         struct ring_buffer_per_cpu *cpu_buffer;
2761         int cpu;
2762
2763         /* Enabled by ring_buffer_nest_end() */
2764         preempt_disable_notrace();
2765         cpu = raw_smp_processor_id();
2766         cpu_buffer = buffer->buffers[cpu];
2767         /* This is the shift value for the above recursive locking */
2768         cpu_buffer->nest += NESTED_BITS;
2769 }
2770
2771 /**
2772  * ring_buffer_nest_end - Allow to trace while nested
2773  * @buffer: The ring buffer to modify
2774  *
2775  * Must be called after ring_buffer_nest_start() and after the
2776  * ring_buffer_unlock_commit().
2777  */
2778 void ring_buffer_nest_end(struct ring_buffer *buffer)
2779 {
2780         struct ring_buffer_per_cpu *cpu_buffer;
2781         int cpu;
2782
2783         /* disabled by ring_buffer_nest_start() */
2784         cpu = raw_smp_processor_id();
2785         cpu_buffer = buffer->buffers[cpu];
2786         /* This is the shift value for the above recursive locking */
2787         cpu_buffer->nest -= NESTED_BITS;
2788         preempt_enable_notrace();
2789 }
2790
2791 /**
2792  * ring_buffer_unlock_commit - commit a reserved
2793  * @buffer: The buffer to commit to
2794  * @event: The event pointer to commit.
2795  *
2796  * This commits the data to the ring buffer, and releases any locks held.
2797  *
2798  * Must be paired with ring_buffer_lock_reserve.
2799  */
2800 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2801                               struct ring_buffer_event *event)
2802 {
2803         struct ring_buffer_per_cpu *cpu_buffer;
2804         int cpu = raw_smp_processor_id();
2805
2806         cpu_buffer = buffer->buffers[cpu];
2807
2808         rb_commit(cpu_buffer, event);
2809
2810         rb_wakeups(buffer, cpu_buffer);
2811
2812         trace_recursive_unlock(cpu_buffer);
2813
2814         preempt_enable_notrace();
2815
2816         return 0;
2817 }
2818 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2819
2820 static noinline void
2821 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2822                     struct rb_event_info *info)
2823 {
2824         WARN_ONCE(info->delta > (1ULL << 59),
2825                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2826                   (unsigned long long)info->delta,
2827                   (unsigned long long)info->ts,
2828                   (unsigned long long)cpu_buffer->write_stamp,
2829                   sched_clock_stable() ? "" :
2830                   "If you just came from a suspend/resume,\n"
2831                   "please switch to the trace global clock:\n"
2832                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2833                   "or add trace_clock=global to the kernel command line\n");
2834         info->add_timestamp = 1;
2835 }
2836
2837 static struct ring_buffer_event *
2838 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2839                   struct rb_event_info *info)
2840 {
2841         struct ring_buffer_event *event;
2842         struct buffer_page *tail_page;
2843         unsigned long tail, write;
2844
2845         /*
2846          * If the time delta since the last event is too big to
2847          * hold in the time field of the event, then we append a
2848          * TIME EXTEND event ahead of the data event.
2849          */
2850         if (unlikely(info->add_timestamp))
2851                 info->length += RB_LEN_TIME_EXTEND;
2852
2853         /* Don't let the compiler play games with cpu_buffer->tail_page */
2854         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2855         write = local_add_return(info->length, &tail_page->write);
2856
2857         /* set write to only the index of the write */
2858         write &= RB_WRITE_MASK;
2859         tail = write - info->length;
2860
2861         /*
2862          * If this is the first commit on the page, then it has the same
2863          * timestamp as the page itself.
2864          */
2865         if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
2866                 info->delta = 0;
2867
2868         /* See if we shot pass the end of this buffer page */
2869         if (unlikely(write > BUF_PAGE_SIZE))
2870                 return rb_move_tail(cpu_buffer, tail, info);
2871
2872         /* We reserved something on the buffer */
2873
2874         event = __rb_page_index(tail_page, tail);
2875         rb_update_event(cpu_buffer, event, info);
2876
2877         local_inc(&tail_page->entries);
2878
2879         /*
2880          * If this is the first commit on the page, then update
2881          * its timestamp.
2882          */
2883         if (!tail)
2884                 tail_page->page->time_stamp = info->ts;
2885
2886         /* account for these added bytes */
2887         local_add(info->length, &cpu_buffer->entries_bytes);
2888
2889         return event;
2890 }
2891
2892 static __always_inline struct ring_buffer_event *
2893 rb_reserve_next_event(struct ring_buffer *buffer,
2894                       struct ring_buffer_per_cpu *cpu_buffer,
2895                       unsigned long length)
2896 {
2897         struct ring_buffer_event *event;
2898         struct rb_event_info info;
2899         int nr_loops = 0;
2900         u64 diff;
2901
2902         rb_start_commit(cpu_buffer);
2903
2904 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2905         /*
2906          * Due to the ability to swap a cpu buffer from a buffer
2907          * it is possible it was swapped before we committed.
2908          * (committing stops a swap). We check for it here and
2909          * if it happened, we have to fail the write.
2910          */
2911         barrier();
2912         if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
2913                 local_dec(&cpu_buffer->committing);
2914                 local_dec(&cpu_buffer->commits);
2915                 return NULL;
2916         }
2917 #endif
2918
2919         info.length = rb_calculate_event_length(length);
2920  again:
2921         info.add_timestamp = 0;
2922         info.delta = 0;
2923
2924         /*
2925          * We allow for interrupts to reenter here and do a trace.
2926          * If one does, it will cause this original code to loop
2927          * back here. Even with heavy interrupts happening, this
2928          * should only happen a few times in a row. If this happens
2929          * 1000 times in a row, there must be either an interrupt
2930          * storm or we have something buggy.
2931          * Bail!
2932          */
2933         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2934                 goto out_fail;
2935
2936         info.ts = rb_time_stamp(cpu_buffer->buffer);
2937         diff = info.ts - cpu_buffer->write_stamp;
2938
2939         /* make sure this diff is calculated here */
2940         barrier();
2941
2942         if (ring_buffer_time_stamp_abs(buffer)) {
2943                 info.delta = info.ts;
2944                 rb_handle_timestamp(cpu_buffer, &info);
2945         } else /* Did the write stamp get updated already? */
2946                 if (likely(info.ts >= cpu_buffer->write_stamp)) {
2947                 info.delta = diff;
2948                 if (unlikely(test_time_stamp(info.delta)))
2949                         rb_handle_timestamp(cpu_buffer, &info);
2950         }
2951
2952         event = __rb_reserve_next(cpu_buffer, &info);
2953
2954         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2955                 if (info.add_timestamp)
2956                         info.length -= RB_LEN_TIME_EXTEND;
2957                 goto again;
2958         }
2959
2960         if (!event)
2961                 goto out_fail;
2962
2963         return event;
2964
2965  out_fail:
2966         rb_end_commit(cpu_buffer);
2967         return NULL;
2968 }
2969
2970 /**
2971  * ring_buffer_lock_reserve - reserve a part of the buffer
2972  * @buffer: the ring buffer to reserve from
2973  * @length: the length of the data to reserve (excluding event header)
2974  *
2975  * Returns a reserved event on the ring buffer to copy directly to.
2976  * The user of this interface will need to get the body to write into
2977  * and can use the ring_buffer_event_data() interface.
2978  *
2979  * The length is the length of the data needed, not the event length
2980  * which also includes the event header.
2981  *
2982  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2983  * If NULL is returned, then nothing has been allocated or locked.
2984  */
2985 struct ring_buffer_event *
2986 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2987 {
2988         struct ring_buffer_per_cpu *cpu_buffer;
2989         struct ring_buffer_event *event;
2990         int cpu;
2991
2992         /* If we are tracing schedule, we don't want to recurse */
2993         preempt_disable_notrace();
2994
2995         if (unlikely(atomic_read(&buffer->record_disabled)))
2996                 goto out;
2997
2998         cpu = raw_smp_processor_id();
2999
3000         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3001                 goto out;
3002
3003         cpu_buffer = buffer->buffers[cpu];
3004
3005         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3006                 goto out;
3007
3008         if (unlikely(length > BUF_MAX_DATA_SIZE))
3009                 goto out;
3010
3011         if (unlikely(trace_recursive_lock(cpu_buffer)))
3012                 goto out;
3013
3014         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3015         if (!event)
3016                 goto out_unlock;
3017
3018         return event;
3019
3020  out_unlock:
3021         trace_recursive_unlock(cpu_buffer);
3022  out:
3023         preempt_enable_notrace();
3024         return NULL;
3025 }
3026 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3027
3028 /*
3029  * Decrement the entries to the page that an event is on.
3030  * The event does not even need to exist, only the pointer
3031  * to the page it is on. This may only be called before the commit
3032  * takes place.
3033  */
3034 static inline void
3035 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3036                    struct ring_buffer_event *event)
3037 {
3038         unsigned long addr = (unsigned long)event;
3039         struct buffer_page *bpage = cpu_buffer->commit_page;
3040         struct buffer_page *start;
3041
3042         addr &= PAGE_MASK;
3043
3044         /* Do the likely case first */
3045         if (likely(bpage->page == (void *)addr)) {
3046                 local_dec(&bpage->entries);
3047                 return;
3048         }
3049
3050         /*
3051          * Because the commit page may be on the reader page we
3052          * start with the next page and check the end loop there.
3053          */
3054         rb_inc_page(cpu_buffer, &bpage);
3055         start = bpage;
3056         do {
3057                 if (bpage->page == (void *)addr) {
3058                         local_dec(&bpage->entries);
3059                         return;
3060                 }
3061                 rb_inc_page(cpu_buffer, &bpage);
3062         } while (bpage != start);
3063
3064         /* commit not part of this buffer?? */
3065         RB_WARN_ON(cpu_buffer, 1);
3066 }
3067
3068 /**
3069  * ring_buffer_commit_discard - discard an event that has not been committed
3070  * @buffer: the ring buffer
3071  * @event: non committed event to discard
3072  *
3073  * Sometimes an event that is in the ring buffer needs to be ignored.
3074  * This function lets the user discard an event in the ring buffer
3075  * and then that event will not be read later.
3076  *
3077  * This function only works if it is called before the item has been
3078  * committed. It will try to free the event from the ring buffer
3079  * if another event has not been added behind it.
3080  *
3081  * If another event has been added behind it, it will set the event
3082  * up as discarded, and perform the commit.
3083  *
3084  * If this function is called, do not call ring_buffer_unlock_commit on
3085  * the event.
3086  */
3087 void ring_buffer_discard_commit(struct ring_buffer *buffer,
3088                                 struct ring_buffer_event *event)
3089 {
3090         struct ring_buffer_per_cpu *cpu_buffer;
3091         int cpu;
3092
3093         /* The event is discarded regardless */
3094         rb_event_discard(event);
3095
3096         cpu = smp_processor_id();
3097         cpu_buffer = buffer->buffers[cpu];
3098
3099         /*
3100          * This must only be called if the event has not been
3101          * committed yet. Thus we can assume that preemption
3102          * is still disabled.
3103          */
3104         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3105
3106         rb_decrement_entry(cpu_buffer, event);
3107         if (rb_try_to_discard(cpu_buffer, event))
3108                 goto out;
3109
3110         /*
3111          * The commit is still visible by the reader, so we
3112          * must still update the timestamp.
3113          */
3114         rb_update_write_stamp(cpu_buffer, event);
3115  out:
3116         rb_end_commit(cpu_buffer);
3117
3118         trace_recursive_unlock(cpu_buffer);
3119
3120         preempt_enable_notrace();
3121
3122 }
3123 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3124
3125 /**
3126  * ring_buffer_write - write data to the buffer without reserving
3127  * @buffer: The ring buffer to write to.
3128  * @length: The length of the data being written (excluding the event header)
3129  * @data: The data to write to the buffer.
3130  *
3131  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3132  * one function. If you already have the data to write to the buffer, it
3133  * may be easier to simply call this function.
3134  *
3135  * Note, like ring_buffer_lock_reserve, the length is the length of the data
3136  * and not the length of the event which would hold the header.
3137  */
3138 int ring_buffer_write(struct ring_buffer *buffer,
3139                       unsigned long length,
3140                       void *data)
3141 {
3142         struct ring_buffer_per_cpu *cpu_buffer;
3143         struct ring_buffer_event *event;
3144         void *body;
3145         int ret = -EBUSY;
3146         int cpu;
3147
3148         preempt_disable_notrace();
3149
3150         if (atomic_read(&buffer->record_disabled))
3151                 goto out;
3152
3153         cpu = raw_smp_processor_id();
3154
3155         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3156                 goto out;
3157
3158         cpu_buffer = buffer->buffers[cpu];
3159
3160         if (atomic_read(&cpu_buffer->record_disabled))
3161                 goto out;
3162
3163         if (length > BUF_MAX_DATA_SIZE)
3164                 goto out;
3165
3166         if (unlikely(trace_recursive_lock(cpu_buffer)))
3167                 goto out;
3168
3169         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3170         if (!event)
3171                 goto out_unlock;
3172
3173         body = rb_event_data(event);
3174
3175         memcpy(body, data, length);
3176
3177         rb_commit(cpu_buffer, event);
3178
3179         rb_wakeups(buffer, cpu_buffer);
3180
3181         ret = 0;
3182
3183  out_unlock:
3184         trace_recursive_unlock(cpu_buffer);
3185
3186  out:
3187         preempt_enable_notrace();
3188
3189         return ret;
3190 }
3191 EXPORT_SYMBOL_GPL(ring_buffer_write);
3192
3193 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3194 {
3195         struct buffer_page *reader = cpu_buffer->reader_page;
3196         struct buffer_page *head = rb_set_head_page(cpu_buffer);
3197         struct buffer_page *commit = cpu_buffer->commit_page;
3198
3199         /* In case of error, head will be NULL */
3200         if (unlikely(!head))
3201                 return true;
3202
3203         return reader->read == rb_page_commit(reader) &&
3204                 (commit == reader ||
3205                  (commit == head &&
3206                   head->read == rb_page_commit(commit)));
3207 }
3208
3209 /**
3210  * ring_buffer_record_disable - stop all writes into the buffer
3211  * @buffer: The ring buffer to stop writes to.
3212  *
3213  * This prevents all writes to the buffer. Any attempt to write
3214  * to the buffer after this will fail and return NULL.
3215  *
3216  * The caller should call synchronize_rcu() after this.
3217  */
3218 void ring_buffer_record_disable(struct ring_buffer *buffer)
3219 {
3220         atomic_inc(&buffer->record_disabled);
3221 }
3222 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3223
3224 /**
3225  * ring_buffer_record_enable - enable writes to the buffer
3226  * @buffer: The ring buffer to enable writes
3227  *
3228  * Note, multiple disables will need the same number of enables
3229  * to truly enable the writing (much like preempt_disable).
3230  */
3231 void ring_buffer_record_enable(struct ring_buffer *buffer)
3232 {
3233         atomic_dec(&buffer->record_disabled);
3234 }
3235 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3236
3237 /**
3238  * ring_buffer_record_off - stop all writes into the buffer
3239  * @buffer: The ring buffer to stop writes to.
3240  *
3241  * This prevents all writes to the buffer. Any attempt to write
3242  * to the buffer after this will fail and return NULL.
3243  *
3244  * This is different than ring_buffer_record_disable() as
3245  * it works like an on/off switch, where as the disable() version
3246  * must be paired with a enable().
3247  */
3248 void ring_buffer_record_off(struct ring_buffer *buffer)
3249 {
3250         unsigned int rd;
3251         unsigned int new_rd;
3252
3253         do {
3254                 rd = atomic_read(&buffer->record_disabled);
3255                 new_rd = rd | RB_BUFFER_OFF;
3256         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3257 }
3258 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3259
3260 /**
3261  * ring_buffer_record_on - restart writes into the buffer
3262  * @buffer: The ring buffer to start writes to.
3263  *
3264  * This enables all writes to the buffer that was disabled by
3265  * ring_buffer_record_off().
3266  *
3267  * This is different than ring_buffer_record_enable() as
3268  * it works like an on/off switch, where as the enable() version
3269  * must be paired with a disable().
3270  */
3271 void ring_buffer_record_on(struct ring_buffer *buffer)
3272 {
3273         unsigned int rd;
3274         unsigned int new_rd;
3275
3276         do {
3277                 rd = atomic_read(&buffer->record_disabled);
3278                 new_rd = rd & ~RB_BUFFER_OFF;
3279         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3280 }
3281 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3282
3283 /**
3284  * ring_buffer_record_is_on - return true if the ring buffer can write
3285  * @buffer: The ring buffer to see if write is enabled
3286  *
3287  * Returns true if the ring buffer is in a state that it accepts writes.
3288  */
3289 bool ring_buffer_record_is_on(struct ring_buffer *buffer)
3290 {
3291         return !atomic_read(&buffer->record_disabled);
3292 }
3293
3294 /**
3295  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3296  * @buffer: The ring buffer to see if write is set enabled
3297  *
3298  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3299  * Note that this does NOT mean it is in a writable state.
3300  *
3301  * It may return true when the ring buffer has been disabled by
3302  * ring_buffer_record_disable(), as that is a temporary disabling of
3303  * the ring buffer.
3304  */
3305 bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3306 {
3307         return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3308 }
3309
3310 /**
3311  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3312  * @buffer: The ring buffer to stop writes to.
3313  * @cpu: The CPU buffer to stop
3314  *
3315  * This prevents all writes to the buffer. Any attempt to write
3316  * to the buffer after this will fail and return NULL.
3317  *
3318  * The caller should call synchronize_rcu() after this.
3319  */
3320 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3321 {
3322         struct ring_buffer_per_cpu *cpu_buffer;
3323
3324         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3325                 return;
3326
3327         cpu_buffer = buffer->buffers[cpu];
3328         atomic_inc(&cpu_buffer->record_disabled);
3329 }
3330 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3331
3332 /**
3333  * ring_buffer_record_enable_cpu - enable writes to the buffer
3334  * @buffer: The ring buffer to enable writes
3335  * @cpu: The CPU to enable.
3336  *
3337  * Note, multiple disables will need the same number of enables
3338  * to truly enable the writing (much like preempt_disable).
3339  */
3340 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3341 {
3342         struct ring_buffer_per_cpu *cpu_buffer;
3343
3344         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3345                 return;
3346
3347         cpu_buffer = buffer->buffers[cpu];
3348         atomic_dec(&cpu_buffer->record_disabled);
3349 }
3350 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3351
3352 /*
3353  * The total entries in the ring buffer is the running counter
3354  * of entries entered into the ring buffer, minus the sum of
3355  * the entries read from the ring buffer and the number of
3356  * entries that were overwritten.
3357  */
3358 static inline unsigned long
3359 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3360 {
3361         return local_read(&cpu_buffer->entries) -
3362                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3363 }
3364
3365 /**
3366  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3367  * @buffer: The ring buffer
3368  * @cpu: The per CPU buffer to read from.
3369  */
3370 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3371 {
3372         unsigned long flags;
3373         struct ring_buffer_per_cpu *cpu_buffer;
3374         struct buffer_page *bpage;
3375         u64 ret = 0;
3376
3377         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3378                 return 0;
3379
3380         cpu_buffer = buffer->buffers[cpu];
3381         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3382         /*
3383          * if the tail is on reader_page, oldest time stamp is on the reader
3384          * page
3385          */
3386         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3387                 bpage = cpu_buffer->reader_page;
3388         else
3389                 bpage = rb_set_head_page(cpu_buffer);
3390         if (bpage)
3391                 ret = bpage->page->time_stamp;
3392         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3393
3394         return ret;
3395 }
3396 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3397
3398 /**
3399  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3400  * @buffer: The ring buffer
3401  * @cpu: The per CPU buffer to read from.
3402  */
3403 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3404 {
3405         struct ring_buffer_per_cpu *cpu_buffer;
3406         unsigned long ret;
3407
3408         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3409                 return 0;
3410
3411         cpu_buffer = buffer->buffers[cpu];
3412         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3413
3414         return ret;
3415 }
3416 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3417
3418 /**
3419  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3420  * @buffer: The ring buffer
3421  * @cpu: The per CPU buffer to get the entries from.
3422  */
3423 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3424 {
3425         struct ring_buffer_per_cpu *cpu_buffer;
3426
3427         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3428                 return 0;
3429
3430         cpu_buffer = buffer->buffers[cpu];
3431
3432         return rb_num_of_entries(cpu_buffer);
3433 }
3434 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3435
3436 /**
3437  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3438  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3439  * @buffer: The ring buffer
3440  * @cpu: The per CPU buffer to get the number of overruns from
3441  */
3442 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3443 {
3444         struct ring_buffer_per_cpu *cpu_buffer;
3445         unsigned long ret;
3446
3447         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3448                 return 0;
3449
3450         cpu_buffer = buffer->buffers[cpu];
3451         ret = local_read(&cpu_buffer->overrun);
3452
3453         return ret;
3454 }
3455 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3456
3457 /**
3458  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3459  * commits failing due to the buffer wrapping around while there are uncommitted
3460  * events, such as during an interrupt storm.
3461  * @buffer: The ring buffer
3462  * @cpu: The per CPU buffer to get the number of overruns from
3463  */
3464 unsigned long
3465 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3466 {
3467         struct ring_buffer_per_cpu *cpu_buffer;
3468         unsigned long ret;
3469
3470         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3471                 return 0;
3472
3473         cpu_buffer = buffer->buffers[cpu];
3474         ret = local_read(&cpu_buffer->commit_overrun);
3475
3476         return ret;
3477 }
3478 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3479
3480 /**
3481  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3482  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3483  * @buffer: The ring buffer
3484  * @cpu: The per CPU buffer to get the number of overruns from
3485  */
3486 unsigned long
3487 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3488 {
3489         struct ring_buffer_per_cpu *cpu_buffer;
3490         unsigned long ret;
3491
3492         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3493                 return 0;
3494
3495         cpu_buffer = buffer->buffers[cpu];
3496         ret = local_read(&cpu_buffer->dropped_events);
3497
3498         return ret;
3499 }
3500 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3501
3502 /**
3503  * ring_buffer_read_events_cpu - get the number of events successfully read
3504  * @buffer: The ring buffer
3505  * @cpu: The per CPU buffer to get the number of events read
3506  */
3507 unsigned long
3508 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3509 {
3510         struct ring_buffer_per_cpu *cpu_buffer;
3511
3512         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3513                 return 0;
3514
3515         cpu_buffer = buffer->buffers[cpu];
3516         return cpu_buffer->read;
3517 }
3518 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3519
3520 /**
3521  * ring_buffer_entries - get the number of entries in a buffer
3522  * @buffer: The ring buffer
3523  *
3524  * Returns the total number of entries in the ring buffer
3525  * (all CPU entries)
3526  */
3527 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3528 {
3529         struct ring_buffer_per_cpu *cpu_buffer;
3530         unsigned long entries = 0;
3531         int cpu;
3532
3533         /* if you care about this being correct, lock the buffer */
3534         for_each_buffer_cpu(buffer, cpu) {
3535                 cpu_buffer = buffer->buffers[cpu];
3536                 entries += rb_num_of_entries(cpu_buffer);
3537         }
3538
3539         return entries;
3540 }
3541 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3542
3543 /**
3544  * ring_buffer_overruns - get the number of overruns in buffer
3545  * @buffer: The ring buffer
3546  *
3547  * Returns the total number of overruns in the ring buffer
3548  * (all CPU entries)
3549  */
3550 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3551 {
3552         struct ring_buffer_per_cpu *cpu_buffer;
3553         unsigned long overruns = 0;
3554         int cpu;
3555
3556         /* if you care about this being correct, lock the buffer */
3557         for_each_buffer_cpu(buffer, cpu) {
3558                 cpu_buffer = buffer->buffers[cpu];
3559                 overruns += local_read(&cpu_buffer->overrun);
3560         }
3561
3562         return overruns;
3563 }
3564 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3565
3566 static void rb_iter_reset(struct ring_buffer_iter *iter)
3567 {
3568         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3569
3570         /* Iterator usage is expected to have record disabled */
3571         iter->head_page = cpu_buffer->reader_page;
3572         iter->head = cpu_buffer->reader_page->read;
3573
3574         iter->cache_reader_page = iter->head_page;
3575         iter->cache_read = cpu_buffer->read;
3576
3577         if (iter->head)
3578                 iter->read_stamp = cpu_buffer->read_stamp;
3579         else
3580                 iter->read_stamp = iter->head_page->page->time_stamp;
3581 }
3582
3583 /**
3584  * ring_buffer_iter_reset - reset an iterator
3585  * @iter: The iterator to reset
3586  *
3587  * Resets the iterator, so that it will start from the beginning
3588  * again.
3589  */
3590 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3591 {
3592         struct ring_buffer_per_cpu *cpu_buffer;
3593         unsigned long flags;
3594
3595         if (!iter)
3596                 return;
3597
3598         cpu_buffer = iter->cpu_buffer;
3599
3600         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3601         rb_iter_reset(iter);
3602         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3603 }
3604 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3605
3606 /**
3607  * ring_buffer_iter_empty - check if an iterator has no more to read
3608  * @iter: The iterator to check
3609  */
3610 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3611 {
3612         struct ring_buffer_per_cpu *cpu_buffer;
3613         struct buffer_page *reader;
3614         struct buffer_page *head_page;
3615         struct buffer_page *commit_page;
3616         unsigned commit;
3617
3618         cpu_buffer = iter->cpu_buffer;
3619
3620         /* Remember, trace recording is off when iterator is in use */
3621         reader = cpu_buffer->reader_page;
3622         head_page = cpu_buffer->head_page;
3623         commit_page = cpu_buffer->commit_page;
3624         commit = rb_page_commit(commit_page);
3625
3626         return ((iter->head_page == commit_page && iter->head == commit) ||
3627                 (iter->head_page == reader && commit_page == head_page &&
3628                  head_page->read == commit &&
3629                  iter->head == rb_page_commit(cpu_buffer->reader_page)));
3630 }
3631 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3632
3633 static void
3634 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3635                      struct ring_buffer_event *event)
3636 {
3637         u64 delta;
3638
3639         switch (event->type_len) {
3640         case RINGBUF_TYPE_PADDING:
3641                 return;
3642
3643         case RINGBUF_TYPE_TIME_EXTEND:
3644                 delta = ring_buffer_event_time_stamp(event);
3645                 cpu_buffer->read_stamp += delta;
3646                 return;
3647
3648         case RINGBUF_TYPE_TIME_STAMP:
3649                 delta = ring_buffer_event_time_stamp(event);
3650                 cpu_buffer->read_stamp = delta;
3651                 return;
3652
3653         case RINGBUF_TYPE_DATA:
3654                 cpu_buffer->read_stamp += event->time_delta;
3655                 return;
3656
3657         default:
3658                 BUG();
3659         }
3660         return;
3661 }
3662
3663 static void
3664 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3665                           struct ring_buffer_event *event)
3666 {
3667         u64 delta;
3668
3669         switch (event->type_len) {
3670         case RINGBUF_TYPE_PADDING:
3671                 return;
3672
3673         case RINGBUF_TYPE_TIME_EXTEND:
3674                 delta = ring_buffer_event_time_stamp(event);
3675                 iter->read_stamp += delta;
3676                 return;
3677
3678         case RINGBUF_TYPE_TIME_STAMP:
3679                 delta = ring_buffer_event_time_stamp(event);
3680                 iter->read_stamp = delta;
3681                 return;
3682
3683         case RINGBUF_TYPE_DATA:
3684                 iter->read_stamp += event->time_delta;
3685                 return;
3686
3687         default:
3688                 BUG();
3689         }
3690         return;
3691 }
3692
3693 static struct buffer_page *
3694 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3695 {
3696         struct buffer_page *reader = NULL;
3697         unsigned long overwrite;
3698         unsigned long flags;
3699         int nr_loops = 0;
3700         int ret;
3701
3702         local_irq_save(flags);
3703         arch_spin_lock(&cpu_buffer->lock);
3704
3705  again:
3706         /*
3707          * This should normally only loop twice. But because the
3708          * start of the reader inserts an empty page, it causes
3709          * a case where we will loop three times. There should be no
3710          * reason to loop four times (that I know of).
3711          */
3712         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3713                 reader = NULL;
3714                 goto out;
3715         }
3716
3717         reader = cpu_buffer->reader_page;
3718
3719         /* If there's more to read, return this page */
3720         if (cpu_buffer->reader_page->read < rb_page_size(reader))
3721                 goto out;
3722
3723         /* Never should we have an index greater than the size */
3724         if (RB_WARN_ON(cpu_buffer,
3725                        cpu_buffer->reader_page->read > rb_page_size(reader)))
3726                 goto out;
3727
3728         /* check if we caught up to the tail */
3729         reader = NULL;
3730         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3731                 goto out;
3732
3733         /* Don't bother swapping if the ring buffer is empty */
3734         if (rb_num_of_entries(cpu_buffer) == 0)
3735                 goto out;
3736
3737         /*
3738          * Reset the reader page to size zero.
3739          */
3740         local_set(&cpu_buffer->reader_page->write, 0);
3741         local_set(&cpu_buffer->reader_page->entries, 0);
3742         local_set(&cpu_buffer->reader_page->page->commit, 0);
3743         cpu_buffer->reader_page->real_end = 0;
3744
3745  spin:
3746         /*
3747          * Splice the empty reader page into the list around the head.
3748          */
3749         reader = rb_set_head_page(cpu_buffer);
3750         if (!reader)
3751                 goto out;
3752         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3753         cpu_buffer->reader_page->list.prev = reader->list.prev;
3754
3755         /*
3756          * cpu_buffer->pages just needs to point to the buffer, it
3757          *  has no specific buffer page to point to. Lets move it out
3758          *  of our way so we don't accidentally swap it.
3759          */
3760         cpu_buffer->pages = reader->list.prev;
3761
3762         /* The reader page will be pointing to the new head */
3763         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3764
3765         /*
3766          * We want to make sure we read the overruns after we set up our
3767          * pointers to the next object. The writer side does a
3768          * cmpxchg to cross pages which acts as the mb on the writer
3769          * side. Note, the reader will constantly fail the swap
3770          * while the writer is updating the pointers, so this
3771          * guarantees that the overwrite recorded here is the one we
3772          * want to compare with the last_overrun.
3773          */
3774         smp_mb();
3775         overwrite = local_read(&(cpu_buffer->overrun));
3776
3777         /*
3778          * Here's the tricky part.
3779          *
3780          * We need to move the pointer past the header page.
3781          * But we can only do that if a writer is not currently
3782          * moving it. The page before the header page has the
3783          * flag bit '1' set if it is pointing to the page we want.
3784          * but if the writer is in the process of moving it
3785          * than it will be '2' or already moved '0'.
3786          */
3787
3788         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3789
3790         /*
3791          * If we did not convert it, then we must try again.
3792          */
3793         if (!ret)
3794                 goto spin;
3795
3796         /*
3797          * Yay! We succeeded in replacing the page.
3798          *
3799          * Now make the new head point back to the reader page.
3800          */
3801         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3802         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3803
3804         local_inc(&cpu_buffer->pages_read);
3805
3806         /* Finally update the reader page to the new head */
3807         cpu_buffer->reader_page = reader;
3808         cpu_buffer->reader_page->read = 0;
3809
3810         if (overwrite != cpu_buffer->last_overrun) {
3811                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3812                 cpu_buffer->last_overrun = overwrite;
3813         }
3814
3815         goto again;
3816
3817  out:
3818         /* Update the read_stamp on the first event */
3819         if (reader && reader->read == 0)
3820                 cpu_buffer->read_stamp = reader->page->time_stamp;
3821
3822         arch_spin_unlock(&cpu_buffer->lock);
3823         local_irq_restore(flags);
3824
3825         return reader;
3826 }
3827
3828 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3829 {
3830         struct ring_buffer_event *event;
3831         struct buffer_page *reader;
3832         unsigned length;
3833
3834         reader = rb_get_reader_page(cpu_buffer);
3835
3836         /* This function should not be called when buffer is empty */
3837         if (RB_WARN_ON(cpu_buffer, !reader))
3838                 return;
3839
3840         event = rb_reader_event(cpu_buffer);
3841
3842         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3843                 cpu_buffer->read++;
3844
3845         rb_update_read_stamp(cpu_buffer, event);
3846
3847         length = rb_event_length(event);
3848         cpu_buffer->reader_page->read += length;
3849 }
3850
3851 static void rb_advance_iter(struct ring_buffer_iter *iter)
3852 {
3853         struct ring_buffer_per_cpu *cpu_buffer;
3854         struct ring_buffer_event *event;
3855         unsigned length;
3856
3857         cpu_buffer = iter->cpu_buffer;
3858
3859         /*
3860          * Check if we are at the end of the buffer.
3861          */
3862         if (iter->head >= rb_page_size(iter->head_page)) {
3863                 /* discarded commits can make the page empty */
3864                 if (iter->head_page == cpu_buffer->commit_page)
3865                         return;
3866                 rb_inc_iter(iter);
3867                 return;
3868         }
3869
3870         event = rb_iter_head_event(iter);
3871
3872         length = rb_event_length(event);
3873
3874         /*
3875          * This should not be called to advance the header if we are
3876          * at the tail of the buffer.
3877          */
3878         if (RB_WARN_ON(cpu_buffer,
3879                        (iter->head_page == cpu_buffer->commit_page) &&
3880                        (iter->head + length > rb_commit_index(cpu_buffer))))
3881                 return;
3882
3883         rb_update_iter_read_stamp(iter, event);
3884
3885         iter->head += length;
3886
3887         /* check for end of page padding */
3888         if ((iter->head >= rb_page_size(iter->head_page)) &&
3889             (iter->head_page != cpu_buffer->commit_page))
3890                 rb_inc_iter(iter);
3891 }
3892
3893 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3894 {
3895         return cpu_buffer->lost_events;
3896 }
3897
3898 static struct ring_buffer_event *
3899 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3900                unsigned long *lost_events)
3901 {
3902         struct ring_buffer_event *event;
3903         struct buffer_page *reader;
3904         int nr_loops = 0;
3905
3906         if (ts)
3907                 *ts = 0;
3908  again:
3909         /*
3910          * We repeat when a time extend is encountered.
3911          * Since the time extend is always attached to a data event,
3912          * we should never loop more than once.
3913          * (We never hit the following condition more than twice).
3914          */
3915         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3916                 return NULL;
3917
3918         reader = rb_get_reader_page(cpu_buffer);
3919         if (!reader)
3920                 return NULL;
3921
3922         event = rb_reader_event(cpu_buffer);
3923
3924         switch (event->type_len) {
3925         case RINGBUF_TYPE_PADDING:
3926                 if (rb_null_event(event))
3927                         RB_WARN_ON(cpu_buffer, 1);
3928                 /*
3929                  * Because the writer could be discarding every
3930                  * event it creates (which would probably be bad)
3931                  * if we were to go back to "again" then we may never
3932                  * catch up, and will trigger the warn on, or lock
3933                  * the box. Return the padding, and we will release
3934                  * the current locks, and try again.
3935                  */
3936                 return event;
3937
3938         case RINGBUF_TYPE_TIME_EXTEND:
3939                 /* Internal data, OK to advance */
3940                 rb_advance_reader(cpu_buffer);
3941                 goto again;
3942
3943         case RINGBUF_TYPE_TIME_STAMP:
3944                 if (ts) {
3945                         *ts = ring_buffer_event_time_stamp(event);
3946                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3947                                                          cpu_buffer->cpu, ts);
3948                 }
3949                 /* Internal data, OK to advance */
3950                 rb_advance_reader(cpu_buffer);
3951                 goto again;
3952
3953         case RINGBUF_TYPE_DATA:
3954                 if (ts && !(*ts)) {
3955                         *ts = cpu_buffer->read_stamp + event->time_delta;
3956                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3957                                                          cpu_buffer->cpu, ts);
3958                 }
3959                 if (lost_events)
3960                         *lost_events = rb_lost_events(cpu_buffer);
3961                 return event;
3962
3963         default:
3964                 BUG();
3965         }
3966
3967         return NULL;
3968 }
3969 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3970
3971 static struct ring_buffer_event *
3972 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3973 {
3974         struct ring_buffer *buffer;
3975         struct ring_buffer_per_cpu *cpu_buffer;
3976         struct ring_buffer_event *event;
3977         int nr_loops = 0;
3978
3979         if (ts)
3980                 *ts = 0;
3981
3982         cpu_buffer = iter->cpu_buffer;
3983         buffer = cpu_buffer->buffer;
3984
3985         /*
3986          * Check if someone performed a consuming read to
3987          * the buffer. A consuming read invalidates the iterator
3988          * and we need to reset the iterator in this case.
3989          */
3990         if (unlikely(iter->cache_read != cpu_buffer->read ||
3991                      iter->cache_reader_page != cpu_buffer->reader_page))
3992                 rb_iter_reset(iter);
3993
3994  again:
3995         if (ring_buffer_iter_empty(iter))
3996                 return NULL;
3997
3998         /*
3999          * We repeat when a time extend is encountered or we hit
4000          * the end of the page. Since the time extend is always attached
4001          * to a data event, we should never loop more than three times.
4002          * Once for going to next page, once on time extend, and
4003          * finally once to get the event.
4004          * (We never hit the following condition more than thrice).
4005          */
4006         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
4007                 return NULL;
4008
4009         if (rb_per_cpu_empty(cpu_buffer))
4010                 return NULL;
4011
4012         if (iter->head >= rb_page_size(iter->head_page)) {
4013                 rb_inc_iter(iter);
4014                 goto again;
4015         }
4016
4017         event = rb_iter_head_event(iter);
4018
4019         switch (event->type_len) {
4020         case RINGBUF_TYPE_PADDING:
4021                 if (rb_null_event(event)) {
4022                         rb_inc_iter(iter);
4023                         goto again;
4024                 }
4025                 rb_advance_iter(iter);
4026                 return event;
4027
4028         case RINGBUF_TYPE_TIME_EXTEND:
4029                 /* Internal data, OK to advance */
4030                 rb_advance_iter(iter);
4031                 goto again;
4032
4033         case RINGBUF_TYPE_TIME_STAMP:
4034                 if (ts) {
4035                         *ts = ring_buffer_event_time_stamp(event);
4036                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4037                                                          cpu_buffer->cpu, ts);
4038                 }
4039                 /* Internal data, OK to advance */
4040                 rb_advance_iter(iter);
4041                 goto again;
4042
4043         case RINGBUF_TYPE_DATA:
4044                 if (ts && !(*ts)) {
4045                         *ts = iter->read_stamp + event->time_delta;
4046                         ring_buffer_normalize_time_stamp(buffer,
4047                                                          cpu_buffer->cpu, ts);
4048                 }
4049                 return event;
4050
4051         default:
4052                 BUG();
4053         }
4054
4055         return NULL;
4056 }
4057 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4058
4059 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4060 {
4061         if (likely(!in_nmi())) {
4062                 raw_spin_lock(&cpu_buffer->reader_lock);
4063                 return true;
4064         }
4065
4066         /*
4067          * If an NMI die dumps out the content of the ring buffer
4068          * trylock must be used to prevent a deadlock if the NMI
4069          * preempted a task that holds the ring buffer locks. If
4070          * we get the lock then all is fine, if not, then continue
4071          * to do the read, but this can corrupt the ring buffer,
4072          * so it must be permanently disabled from future writes.
4073          * Reading from NMI is a oneshot deal.
4074          */
4075         if (raw_spin_trylock(&cpu_buffer->reader_lock))
4076                 return true;
4077
4078         /* Continue without locking, but disable the ring buffer */
4079         atomic_inc(&cpu_buffer->record_disabled);
4080         return false;
4081 }
4082
4083 static inline void
4084 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4085 {
4086         if (likely(locked))
4087                 raw_spin_unlock(&cpu_buffer->reader_lock);
4088         return;
4089 }
4090
4091 /**
4092  * ring_buffer_peek - peek at the next event to be read
4093  * @buffer: The ring buffer to read
4094  * @cpu: The cpu to peak at
4095  * @ts: The timestamp counter of this event.
4096  * @lost_events: a variable to store if events were lost (may be NULL)
4097  *
4098  * This will return the event that will be read next, but does
4099  * not consume the data.
4100  */
4101 struct ring_buffer_event *
4102 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
4103                  unsigned long *lost_events)
4104 {
4105         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4106         struct ring_buffer_event *event;
4107         unsigned long flags;
4108         bool dolock;
4109
4110         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4111                 return NULL;
4112
4113  again:
4114         local_irq_save(flags);
4115         dolock = rb_reader_lock(cpu_buffer);
4116         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4117         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4118                 rb_advance_reader(cpu_buffer);
4119         rb_reader_unlock(cpu_buffer, dolock);
4120         local_irq_restore(flags);
4121
4122         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4123                 goto again;
4124
4125         return event;
4126 }
4127
4128 /**
4129  * ring_buffer_iter_peek - peek at the next event to be read
4130  * @iter: The ring buffer iterator
4131  * @ts: The timestamp counter of this event.
4132  *
4133  * This will return the event that will be read next, but does
4134  * not increment the iterator.
4135  */
4136 struct ring_buffer_event *
4137 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4138 {
4139         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4140         struct ring_buffer_event *event;
4141         unsigned long flags;
4142
4143  again:
4144         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4145         event = rb_iter_peek(iter, ts);
4146         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4147
4148         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4149                 goto again;
4150
4151         return event;
4152 }
4153
4154 /**
4155  * ring_buffer_consume - return an event and consume it
4156  * @buffer: The ring buffer to get the next event from
4157  * @cpu: the cpu to read the buffer from
4158  * @ts: a variable to store the timestamp (may be NULL)
4159  * @lost_events: a variable to store if events were lost (may be NULL)
4160  *
4161  * Returns the next event in the ring buffer, and that event is consumed.
4162  * Meaning, that sequential reads will keep returning a different event,
4163  * and eventually empty the ring buffer if the producer is slower.
4164  */
4165 struct ring_buffer_event *
4166 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
4167                     unsigned long *lost_events)
4168 {
4169         struct ring_buffer_per_cpu *cpu_buffer;
4170         struct ring_buffer_event *event = NULL;
4171         unsigned long flags;
4172         bool dolock;
4173
4174  again:
4175         /* might be called in atomic */
4176         preempt_disable();
4177
4178         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4179                 goto out;
4180
4181         cpu_buffer = buffer->buffers[cpu];
4182         local_irq_save(flags);
4183         dolock = rb_reader_lock(cpu_buffer);
4184
4185         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4186         if (event) {
4187                 cpu_buffer->lost_events = 0;
4188                 rb_advance_reader(cpu_buffer);
4189         }
4190
4191         rb_reader_unlock(cpu_buffer, dolock);
4192         local_irq_restore(flags);
4193
4194  out:
4195         preempt_enable();
4196
4197         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4198                 goto again;
4199
4200         return event;
4201 }
4202 EXPORT_SYMBOL_GPL(ring_buffer_consume);
4203
4204 /**
4205  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4206  * @buffer: The ring buffer to read from
4207  * @cpu: The cpu buffer to iterate over
4208  *
4209  * This performs the initial preparations necessary to iterate
4210  * through the buffer.  Memory is allocated, buffer recording
4211  * is disabled, and the iterator pointer is returned to the caller.
4212  *
4213  * Disabling buffer recording prevents the reading from being
4214  * corrupted. This is not a consuming read, so a producer is not
4215  * expected.
4216  *
4217  * After a sequence of ring_buffer_read_prepare calls, the user is
4218  * expected to make at least one call to ring_buffer_read_prepare_sync.
4219  * Afterwards, ring_buffer_read_start is invoked to get things going
4220  * for real.
4221  *
4222  * This overall must be paired with ring_buffer_read_finish.
4223  */
4224 struct ring_buffer_iter *
4225 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4226 {
4227         struct ring_buffer_per_cpu *cpu_buffer;
4228         struct ring_buffer_iter *iter;
4229
4230         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4231                 return NULL;
4232
4233         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4234         if (!iter)
4235                 return NULL;
4236
4237         cpu_buffer = buffer->buffers[cpu];
4238
4239         iter->cpu_buffer = cpu_buffer;
4240
4241         atomic_inc(&buffer->resize_disabled);
4242         atomic_inc(&cpu_buffer->record_disabled);
4243
4244         return iter;
4245 }
4246 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4247
4248 /**
4249  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4250  *
4251  * All previously invoked ring_buffer_read_prepare calls to prepare
4252  * iterators will be synchronized.  Afterwards, read_buffer_read_start
4253  * calls on those iterators are allowed.
4254  */
4255 void
4256 ring_buffer_read_prepare_sync(void)
4257 {
4258         synchronize_rcu();
4259 }
4260 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4261
4262 /**
4263  * ring_buffer_read_start - start a non consuming read of the buffer
4264  * @iter: The iterator returned by ring_buffer_read_prepare
4265  *
4266  * This finalizes the startup of an iteration through the buffer.
4267  * The iterator comes from a call to ring_buffer_read_prepare and
4268  * an intervening ring_buffer_read_prepare_sync must have been
4269  * performed.
4270  *
4271  * Must be paired with ring_buffer_read_finish.
4272  */
4273 void
4274 ring_buffer_read_start(struct ring_buffer_iter *iter)
4275 {
4276         struct ring_buffer_per_cpu *cpu_buffer;
4277         unsigned long flags;
4278
4279         if (!iter)
4280                 return;
4281
4282         cpu_buffer = iter->cpu_buffer;
4283
4284         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4285         arch_spin_lock(&cpu_buffer->lock);
4286         rb_iter_reset(iter);
4287         arch_spin_unlock(&cpu_buffer->lock);
4288         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4289 }
4290 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4291
4292 /**
4293  * ring_buffer_read_finish - finish reading the iterator of the buffer
4294  * @iter: The iterator retrieved by ring_buffer_start
4295  *
4296  * This re-enables the recording to the buffer, and frees the
4297  * iterator.
4298  */
4299 void
4300 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4301 {
4302         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4303         unsigned long flags;
4304
4305         /*
4306          * Ring buffer is disabled from recording, here's a good place
4307          * to check the integrity of the ring buffer.
4308          * Must prevent readers from trying to read, as the check
4309          * clears the HEAD page and readers require it.
4310          */
4311         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4312         rb_check_pages(cpu_buffer);
4313         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4314
4315         atomic_dec(&cpu_buffer->record_disabled);
4316         atomic_dec(&cpu_buffer->buffer->resize_disabled);
4317         kfree(iter);
4318 }
4319 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4320
4321 /**
4322  * ring_buffer_read - read the next item in the ring buffer by the iterator
4323  * @iter: The ring buffer iterator
4324  * @ts: The time stamp of the event read.
4325  *
4326  * This reads the next event in the ring buffer and increments the iterator.
4327  */
4328 struct ring_buffer_event *
4329 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4330 {
4331         struct ring_buffer_event *event;
4332         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4333         unsigned long flags;
4334
4335         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4336  again:
4337         event = rb_iter_peek(iter, ts);
4338         if (!event)
4339                 goto out;
4340
4341         if (event->type_len == RINGBUF_TYPE_PADDING)
4342                 goto again;
4343
4344         rb_advance_iter(iter);
4345  out:
4346         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4347
4348         return event;
4349 }
4350 EXPORT_SYMBOL_GPL(ring_buffer_read);
4351
4352 /**
4353  * ring_buffer_size - return the size of the ring buffer (in bytes)
4354  * @buffer: The ring buffer.
4355  */
4356 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4357 {
4358         /*
4359          * Earlier, this method returned
4360          *      BUF_PAGE_SIZE * buffer->nr_pages
4361          * Since the nr_pages field is now removed, we have converted this to
4362          * return the per cpu buffer value.
4363          */
4364         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4365                 return 0;
4366
4367         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4368 }
4369 EXPORT_SYMBOL_GPL(ring_buffer_size);
4370
4371 static void
4372 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4373 {
4374         rb_head_page_deactivate(cpu_buffer);
4375
4376         cpu_buffer->head_page
4377                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4378         local_set(&cpu_buffer->head_page->write, 0);
4379         local_set(&cpu_buffer->head_page->entries, 0);
4380         local_set(&cpu_buffer->head_page->page->commit, 0);
4381
4382         cpu_buffer->head_page->read = 0;
4383
4384         cpu_buffer->tail_page = cpu_buffer->head_page;
4385         cpu_buffer->commit_page = cpu_buffer->head_page;
4386
4387         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4388         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4389         local_set(&cpu_buffer->reader_page->write, 0);
4390         local_set(&cpu_buffer->reader_page->entries, 0);
4391         local_set(&cpu_buffer->reader_page->page->commit, 0);
4392         cpu_buffer->reader_page->read = 0;
4393
4394         local_set(&cpu_buffer->entries_bytes, 0);
4395         local_set(&cpu_buffer->overrun, 0);
4396         local_set(&cpu_buffer->commit_overrun, 0);
4397         local_set(&cpu_buffer->dropped_events, 0);
4398         local_set(&cpu_buffer->entries, 0);
4399         local_set(&cpu_buffer->committing, 0);
4400         local_set(&cpu_buffer->commits, 0);
4401         local_set(&cpu_buffer->pages_touched, 0);
4402         local_set(&cpu_buffer->pages_read, 0);
4403         cpu_buffer->last_pages_touch = 0;
4404         cpu_buffer->shortest_full = 0;
4405         cpu_buffer->read = 0;
4406         cpu_buffer->read_bytes = 0;
4407
4408         cpu_buffer->write_stamp = 0;
4409         cpu_buffer->read_stamp = 0;
4410
4411         cpu_buffer->lost_events = 0;
4412         cpu_buffer->last_overrun = 0;
4413
4414         rb_head_page_activate(cpu_buffer);
4415 }
4416
4417 /**
4418  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4419  * @buffer: The ring buffer to reset a per cpu buffer of
4420  * @cpu: The CPU buffer to be reset
4421  */
4422 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4423 {
4424         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4425         unsigned long flags;
4426
4427         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4428                 return;
4429
4430         atomic_inc(&buffer->resize_disabled);
4431         atomic_inc(&cpu_buffer->record_disabled);
4432
4433         /* Make sure all commits have finished */
4434         synchronize_rcu();
4435
4436         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4437
4438         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4439                 goto out;
4440
4441         arch_spin_lock(&cpu_buffer->lock);
4442
4443         rb_reset_cpu(cpu_buffer);
4444
4445         arch_spin_unlock(&cpu_buffer->lock);
4446
4447  out:
4448         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4449
4450         atomic_dec(&cpu_buffer->record_disabled);
4451         atomic_dec(&buffer->resize_disabled);
4452 }
4453 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4454
4455 /**
4456  * ring_buffer_reset - reset a ring buffer
4457  * @buffer: The ring buffer to reset all cpu buffers
4458  */
4459 void ring_buffer_reset(struct ring_buffer *buffer)
4460 {
4461         int cpu;
4462
4463         for_each_buffer_cpu(buffer, cpu)
4464                 ring_buffer_reset_cpu(buffer, cpu);
4465 }
4466 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4467
4468 /**
4469  * rind_buffer_empty - is the ring buffer empty?
4470  * @buffer: The ring buffer to test
4471  */
4472 bool ring_buffer_empty(struct ring_buffer *buffer)
4473 {
4474         struct ring_buffer_per_cpu *cpu_buffer;
4475         unsigned long flags;
4476         bool dolock;
4477         int cpu;
4478         int ret;
4479
4480         /* yes this is racy, but if you don't like the race, lock the buffer */
4481         for_each_buffer_cpu(buffer, cpu) {
4482                 cpu_buffer = buffer->buffers[cpu];
4483                 local_irq_save(flags);
4484                 dolock = rb_reader_lock(cpu_buffer);
4485                 ret = rb_per_cpu_empty(cpu_buffer);
4486                 rb_reader_unlock(cpu_buffer, dolock);
4487                 local_irq_restore(flags);
4488
4489                 if (!ret)
4490                         return false;
4491         }
4492
4493         return true;
4494 }
4495 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4496
4497 /**
4498  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4499  * @buffer: The ring buffer
4500  * @cpu: The CPU buffer to test
4501  */
4502 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4503 {
4504         struct ring_buffer_per_cpu *cpu_buffer;
4505         unsigned long flags;
4506         bool dolock;
4507         int ret;
4508
4509         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4510                 return true;
4511
4512         cpu_buffer = buffer->buffers[cpu];
4513         local_irq_save(flags);
4514         dolock = rb_reader_lock(cpu_buffer);
4515         ret = rb_per_cpu_empty(cpu_buffer);
4516         rb_reader_unlock(cpu_buffer, dolock);
4517         local_irq_restore(flags);
4518
4519         return ret;
4520 }
4521 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4522
4523 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4524 /**
4525  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4526  * @buffer_a: One buffer to swap with
4527  * @buffer_b: The other buffer to swap with
4528  *
4529  * This function is useful for tracers that want to take a "snapshot"
4530  * of a CPU buffer and has another back up buffer lying around.
4531  * it is expected that the tracer handles the cpu buffer not being
4532  * used at the moment.
4533  */
4534 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4535                          struct ring_buffer *buffer_b, int cpu)
4536 {
4537         struct ring_buffer_per_cpu *cpu_buffer_a;
4538         struct ring_buffer_per_cpu *cpu_buffer_b;
4539         int ret = -EINVAL;
4540
4541         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4542             !cpumask_test_cpu(cpu, buffer_b->cpumask))
4543                 goto out;
4544
4545         cpu_buffer_a = buffer_a->buffers[cpu];
4546         cpu_buffer_b = buffer_b->buffers[cpu];
4547
4548         /* At least make sure the two buffers are somewhat the same */
4549         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4550                 goto out;
4551
4552         ret = -EAGAIN;
4553
4554         if (atomic_read(&buffer_a->record_disabled))
4555                 goto out;
4556
4557         if (atomic_read(&buffer_b->record_disabled))
4558                 goto out;
4559
4560         if (atomic_read(&cpu_buffer_a->record_disabled))
4561                 goto out;
4562
4563         if (atomic_read(&cpu_buffer_b->record_disabled))
4564                 goto out;
4565
4566         /*
4567          * We can't do a synchronize_rcu here because this
4568          * function can be called in atomic context.
4569          * Normally this will be called from the same CPU as cpu.
4570          * If not it's up to the caller to protect this.
4571          */
4572         atomic_inc(&cpu_buffer_a->record_disabled);
4573         atomic_inc(&cpu_buffer_b->record_disabled);
4574
4575         ret = -EBUSY;
4576         if (local_read(&cpu_buffer_a->committing))
4577                 goto out_dec;
4578         if (local_read(&cpu_buffer_b->committing))
4579                 goto out_dec;
4580
4581         buffer_a->buffers[cpu] = cpu_buffer_b;
4582         buffer_b->buffers[cpu] = cpu_buffer_a;
4583
4584         cpu_buffer_b->buffer = buffer_a;
4585         cpu_buffer_a->buffer = buffer_b;
4586
4587         ret = 0;
4588
4589 out_dec:
4590         atomic_dec(&cpu_buffer_a->record_disabled);
4591         atomic_dec(&cpu_buffer_b->record_disabled);
4592 out:
4593         return ret;
4594 }
4595 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4596 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4597
4598 /**
4599  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4600  * @buffer: the buffer to allocate for.
4601  * @cpu: the cpu buffer to allocate.
4602  *
4603  * This function is used in conjunction with ring_buffer_read_page.
4604  * When reading a full page from the ring buffer, these functions
4605  * can be used to speed up the process. The calling function should
4606  * allocate a few pages first with this function. Then when it
4607  * needs to get pages from the ring buffer, it passes the result
4608  * of this function into ring_buffer_read_page, which will swap
4609  * the page that was allocated, with the read page of the buffer.
4610  *
4611  * Returns:
4612  *  The page allocated, or ERR_PTR
4613  */
4614 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4615 {
4616         struct ring_buffer_per_cpu *cpu_buffer;
4617         struct buffer_data_page *bpage = NULL;
4618         unsigned long flags;
4619         struct page *page;
4620
4621         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4622                 return ERR_PTR(-ENODEV);
4623
4624         cpu_buffer = buffer->buffers[cpu];
4625         local_irq_save(flags);
4626         arch_spin_lock(&cpu_buffer->lock);
4627
4628         if (cpu_buffer->free_page) {
4629                 bpage = cpu_buffer->free_page;
4630                 cpu_buffer->free_page = NULL;
4631         }
4632
4633         arch_spin_unlock(&cpu_buffer->lock);
4634         local_irq_restore(flags);
4635
4636         if (bpage)
4637                 goto out;
4638
4639         page = alloc_pages_node(cpu_to_node(cpu),
4640                                 GFP_KERNEL | __GFP_NORETRY, 0);
4641         if (!page)
4642                 return ERR_PTR(-ENOMEM);
4643
4644         bpage = page_address(page);
4645
4646  out:
4647         rb_init_page(bpage);
4648
4649         return bpage;
4650 }
4651 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4652
4653 /**
4654  * ring_buffer_free_read_page - free an allocated read page
4655  * @buffer: the buffer the page was allocate for
4656  * @cpu: the cpu buffer the page came from
4657  * @data: the page to free
4658  *
4659  * Free a page allocated from ring_buffer_alloc_read_page.
4660  */
4661 void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4662 {
4663         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4664         struct buffer_data_page *bpage = data;
4665         struct page *page = virt_to_page(bpage);
4666         unsigned long flags;
4667
4668         /* If the page is still in use someplace else, we can't reuse it */
4669         if (page_ref_count(page) > 1)
4670                 goto out;
4671
4672         local_irq_save(flags);
4673         arch_spin_lock(&cpu_buffer->lock);
4674
4675         if (!cpu_buffer->free_page) {
4676                 cpu_buffer->free_page = bpage;
4677                 bpage = NULL;
4678         }
4679
4680         arch_spin_unlock(&cpu_buffer->lock);
4681         local_irq_restore(flags);
4682
4683  out:
4684         free_page((unsigned long)bpage);
4685 }
4686 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4687
4688 /**
4689  * ring_buffer_read_page - extract a page from the ring buffer
4690  * @buffer: buffer to extract from
4691  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4692  * @len: amount to extract
4693  * @cpu: the cpu of the buffer to extract
4694  * @full: should the extraction only happen when the page is full.
4695  *
4696  * This function will pull out a page from the ring buffer and consume it.
4697  * @data_page must be the address of the variable that was returned
4698  * from ring_buffer_alloc_read_page. This is because the page might be used
4699  * to swap with a page in the ring buffer.
4700  *
4701  * for example:
4702  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
4703  *      if (IS_ERR(rpage))
4704  *              return PTR_ERR(rpage);
4705  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4706  *      if (ret >= 0)
4707  *              process_page(rpage, ret);
4708  *
4709  * When @full is set, the function will not return true unless
4710  * the writer is off the reader page.
4711  *
4712  * Note: it is up to the calling functions to handle sleeps and wakeups.
4713  *  The ring buffer can be used anywhere in the kernel and can not
4714  *  blindly call wake_up. The layer that uses the ring buffer must be
4715  *  responsible for that.
4716  *
4717  * Returns:
4718  *  >=0 if data has been transferred, returns the offset of consumed data.
4719  *  <0 if no data has been transferred.
4720  */
4721 int ring_buffer_read_page(struct ring_buffer *buffer,
4722                           void **data_page, size_t len, int cpu, int full)
4723 {
4724         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4725         struct ring_buffer_event *event;
4726         struct buffer_data_page *bpage;
4727         struct buffer_page *reader;
4728         unsigned long missed_events;
4729         unsigned long flags;
4730         unsigned int commit;
4731         unsigned int read;
4732         u64 save_timestamp;
4733         int ret = -1;
4734
4735         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4736                 goto out;
4737
4738         /*
4739          * If len is not big enough to hold the page header, then
4740          * we can not copy anything.
4741          */
4742         if (len <= BUF_PAGE_HDR_SIZE)
4743                 goto out;
4744
4745         len -= BUF_PAGE_HDR_SIZE;
4746
4747         if (!data_page)
4748                 goto out;
4749
4750         bpage = *data_page;
4751         if (!bpage)
4752                 goto out;
4753
4754         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4755
4756         reader = rb_get_reader_page(cpu_buffer);
4757         if (!reader)
4758                 goto out_unlock;
4759
4760         event = rb_reader_event(cpu_buffer);
4761
4762         read = reader->read;
4763         commit = rb_page_commit(reader);
4764
4765         /* Check if any events were dropped */
4766         missed_events = cpu_buffer->lost_events;
4767
4768         /*
4769          * If this page has been partially read or
4770          * if len is not big enough to read the rest of the page or
4771          * a writer is still on the page, then
4772          * we must copy the data from the page to the buffer.
4773          * Otherwise, we can simply swap the page with the one passed in.
4774          */
4775         if (read || (len < (commit - read)) ||
4776             cpu_buffer->reader_page == cpu_buffer->commit_page) {
4777                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4778                 unsigned int rpos = read;
4779                 unsigned int pos = 0;
4780                 unsigned int size;
4781
4782                 if (full)
4783                         goto out_unlock;
4784
4785                 if (len > (commit - read))
4786                         len = (commit - read);
4787
4788                 /* Always keep the time extend and data together */
4789                 size = rb_event_ts_length(event);
4790
4791                 if (len < size)
4792                         goto out_unlock;
4793
4794                 /* save the current timestamp, since the user will need it */
4795                 save_timestamp = cpu_buffer->read_stamp;
4796
4797                 /* Need to copy one event at a time */
4798                 do {
4799                         /* We need the size of one event, because
4800                          * rb_advance_reader only advances by one event,
4801                          * whereas rb_event_ts_length may include the size of
4802                          * one or two events.
4803                          * We have already ensured there's enough space if this
4804                          * is a time extend. */
4805                         size = rb_event_length(event);
4806                         memcpy(bpage->data + pos, rpage->data + rpos, size);
4807
4808                         len -= size;
4809
4810                         rb_advance_reader(cpu_buffer);
4811                         rpos = reader->read;
4812                         pos += size;
4813
4814                         if (rpos >= commit)
4815                                 break;
4816
4817                         event = rb_reader_event(cpu_buffer);
4818                         /* Always keep the time extend and data together */
4819                         size = rb_event_ts_length(event);
4820                 } while (len >= size);
4821
4822                 /* update bpage */
4823                 local_set(&bpage->commit, pos);
4824                 bpage->time_stamp = save_timestamp;
4825
4826                 /* we copied everything to the beginning */
4827                 read = 0;
4828         } else {
4829                 /* update the entry counter */
4830                 cpu_buffer->read += rb_page_entries(reader);
4831                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4832
4833                 /* swap the pages */
4834                 rb_init_page(bpage);
4835                 bpage = reader->page;
4836                 reader->page = *data_page;
4837                 local_set(&reader->write, 0);
4838                 local_set(&reader->entries, 0);
4839                 reader->read = 0;
4840                 *data_page = bpage;
4841
4842                 /*
4843                  * Use the real_end for the data size,
4844                  * This gives us a chance to store the lost events
4845                  * on the page.
4846                  */
4847                 if (reader->real_end)
4848                         local_set(&bpage->commit, reader->real_end);
4849         }
4850         ret = read;
4851
4852         cpu_buffer->lost_events = 0;
4853
4854         commit = local_read(&bpage->commit);
4855         /*
4856          * Set a flag in the commit field if we lost events
4857          */
4858         if (missed_events) {
4859                 /* If there is room at the end of the page to save the
4860                  * missed events, then record it there.
4861                  */
4862                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4863                         memcpy(&bpage->data[commit], &missed_events,
4864                                sizeof(missed_events));
4865                         local_add(RB_MISSED_STORED, &bpage->commit);
4866                         commit += sizeof(missed_events);
4867                 }
4868                 local_add(RB_MISSED_EVENTS, &bpage->commit);
4869         }
4870
4871         /*
4872          * This page may be off to user land. Zero it out here.
4873          */
4874         if (commit < BUF_PAGE_SIZE)
4875                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4876
4877  out_unlock:
4878         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4879
4880  out:
4881         return ret;
4882 }
4883 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4884
4885 /*
4886  * We only allocate new buffers, never free them if the CPU goes down.
4887  * If we were to free the buffer, then the user would lose any trace that was in
4888  * the buffer.
4889  */
4890 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4891 {
4892         struct ring_buffer *buffer;
4893         long nr_pages_same;
4894         int cpu_i;
4895         unsigned long nr_pages;
4896
4897         buffer = container_of(node, struct ring_buffer, node);
4898         if (cpumask_test_cpu(cpu, buffer->cpumask))
4899                 return 0;
4900
4901         nr_pages = 0;
4902         nr_pages_same = 1;
4903         /* check if all cpu sizes are same */
4904         for_each_buffer_cpu(buffer, cpu_i) {
4905                 /* fill in the size from first enabled cpu */
4906                 if (nr_pages == 0)
4907                         nr_pages = buffer->buffers[cpu_i]->nr_pages;
4908                 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4909                         nr_pages_same = 0;
4910                         break;
4911                 }
4912         }
4913         /* allocate minimum pages, user can later expand it */
4914         if (!nr_pages_same)
4915                 nr_pages = 2;
4916         buffer->buffers[cpu] =
4917                 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4918         if (!buffer->buffers[cpu]) {
4919                 WARN(1, "failed to allocate ring buffer on CPU %u\n",
4920                      cpu);
4921                 return -ENOMEM;
4922         }
4923         smp_wmb();
4924         cpumask_set_cpu(cpu, buffer->cpumask);
4925         return 0;
4926 }
4927
4928 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4929 /*
4930  * This is a basic integrity check of the ring buffer.
4931  * Late in the boot cycle this test will run when configured in.
4932  * It will kick off a thread per CPU that will go into a loop
4933  * writing to the per cpu ring buffer various sizes of data.
4934  * Some of the data will be large items, some small.
4935  *
4936  * Another thread is created that goes into a spin, sending out
4937  * IPIs to the other CPUs to also write into the ring buffer.
4938  * this is to test the nesting ability of the buffer.
4939  *
4940  * Basic stats are recorded and reported. If something in the
4941  * ring buffer should happen that's not expected, a big warning
4942  * is displayed and all ring buffers are disabled.
4943  */
4944 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4945
4946 struct rb_test_data {
4947         struct ring_buffer      *buffer;
4948         unsigned long           events;
4949         unsigned long           bytes_written;
4950         unsigned long           bytes_alloc;
4951         unsigned long           bytes_dropped;
4952         unsigned long           events_nested;
4953         unsigned long           bytes_written_nested;
4954         unsigned long           bytes_alloc_nested;
4955         unsigned long           bytes_dropped_nested;
4956         int                     min_size_nested;
4957         int                     max_size_nested;
4958         int                     max_size;
4959         int                     min_size;
4960         int                     cpu;
4961         int                     cnt;
4962 };
4963
4964 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4965
4966 /* 1 meg per cpu */
4967 #define RB_TEST_BUFFER_SIZE     1048576
4968
4969 static char rb_string[] __initdata =
4970         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4971         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4972         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4973
4974 static bool rb_test_started __initdata;
4975
4976 struct rb_item {
4977         int size;
4978         char str[];
4979 };
4980
4981 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4982 {
4983         struct ring_buffer_event *event;
4984         struct rb_item *item;
4985         bool started;
4986         int event_len;
4987         int size;
4988         int len;
4989         int cnt;
4990
4991         /* Have nested writes different that what is written */
4992         cnt = data->cnt + (nested ? 27 : 0);
4993
4994         /* Multiply cnt by ~e, to make some unique increment */
4995         size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4996
4997         len = size + sizeof(struct rb_item);
4998
4999         started = rb_test_started;
5000         /* read rb_test_started before checking buffer enabled */
5001         smp_rmb();
5002
5003         event = ring_buffer_lock_reserve(data->buffer, len);
5004         if (!event) {
5005                 /* Ignore dropped events before test starts. */
5006                 if (started) {
5007                         if (nested)
5008                                 data->bytes_dropped += len;
5009                         else
5010                                 data->bytes_dropped_nested += len;
5011                 }
5012                 return len;
5013         }
5014
5015         event_len = ring_buffer_event_length(event);
5016
5017         if (RB_WARN_ON(data->buffer, event_len < len))
5018                 goto out;
5019
5020         item = ring_buffer_event_data(event);
5021         item->size = size;
5022         memcpy(item->str, rb_string, size);
5023
5024         if (nested) {
5025                 data->bytes_alloc_nested += event_len;
5026                 data->bytes_written_nested += len;
5027                 data->events_nested++;
5028                 if (!data->min_size_nested || len < data->min_size_nested)
5029                         data->min_size_nested = len;
5030                 if (len > data->max_size_nested)
5031                         data->max_size_nested = len;
5032         } else {
5033                 data->bytes_alloc += event_len;
5034                 data->bytes_written += len;
5035                 data->events++;
5036                 if (!data->min_size || len < data->min_size)
5037                         data->max_size = len;
5038                 if (len > data->max_size)
5039                         data->max_size = len;
5040         }
5041
5042  out:
5043         ring_buffer_unlock_commit(data->buffer, event);
5044
5045         return 0;
5046 }
5047
5048 static __init int rb_test(void *arg)
5049 {
5050         struct rb_test_data *data = arg;
5051
5052         while (!kthread_should_stop()) {
5053                 rb_write_something(data, false);
5054                 data->cnt++;
5055
5056                 set_current_state(TASK_INTERRUPTIBLE);
5057                 /* Now sleep between a min of 100-300us and a max of 1ms */
5058                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5059         }
5060
5061         return 0;
5062 }
5063
5064 static __init void rb_ipi(void *ignore)
5065 {
5066         struct rb_test_data *data;
5067         int cpu = smp_processor_id();
5068
5069         data = &rb_data[cpu];
5070         rb_write_something(data, true);
5071 }
5072
5073 static __init int rb_hammer_test(void *arg)
5074 {
5075         while (!kthread_should_stop()) {
5076
5077                 /* Send an IPI to all cpus to write data! */
5078                 smp_call_function(rb_ipi, NULL, 1);
5079                 /* No sleep, but for non preempt, let others run */
5080                 schedule();
5081         }
5082
5083         return 0;
5084 }
5085
5086 static __init int test_ringbuffer(void)
5087 {
5088         struct task_struct *rb_hammer;
5089         struct ring_buffer *buffer;
5090         int cpu;
5091         int ret = 0;
5092
5093         pr_info("Running ring buffer tests...\n");
5094
5095         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5096         if (WARN_ON(!buffer))
5097                 return 0;
5098
5099         /* Disable buffer so that threads can't write to it yet */
5100         ring_buffer_record_off(buffer);
5101
5102         for_each_online_cpu(cpu) {
5103                 rb_data[cpu].buffer = buffer;
5104                 rb_data[cpu].cpu = cpu;
5105                 rb_data[cpu].cnt = cpu;
5106                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5107                                                  "rbtester/%d", cpu);
5108                 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5109                         pr_cont("FAILED\n");
5110                         ret = PTR_ERR(rb_threads[cpu]);
5111                         goto out_free;
5112                 }
5113
5114                 kthread_bind(rb_threads[cpu], cpu);
5115                 wake_up_process(rb_threads[cpu]);
5116         }
5117
5118         /* Now create the rb hammer! */
5119         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5120         if (WARN_ON(IS_ERR(rb_hammer))) {
5121                 pr_cont("FAILED\n");
5122                 ret = PTR_ERR(rb_hammer);
5123                 goto out_free;
5124         }
5125
5126         ring_buffer_record_on(buffer);
5127         /*
5128          * Show buffer is enabled before setting rb_test_started.
5129          * Yes there's a small race window where events could be
5130          * dropped and the thread wont catch it. But when a ring
5131          * buffer gets enabled, there will always be some kind of
5132          * delay before other CPUs see it. Thus, we don't care about
5133          * those dropped events. We care about events dropped after
5134          * the threads see that the buffer is active.
5135          */
5136         smp_wmb();
5137         rb_test_started = true;
5138
5139         set_current_state(TASK_INTERRUPTIBLE);
5140         /* Just run for 10 seconds */;
5141         schedule_timeout(10 * HZ);
5142
5143         kthread_stop(rb_hammer);
5144
5145  out_free:
5146         for_each_online_cpu(cpu) {
5147                 if (!rb_threads[cpu])
5148                         break;
5149                 kthread_stop(rb_threads[cpu]);
5150         }
5151         if (ret) {
5152                 ring_buffer_free(buffer);
5153                 return ret;
5154         }
5155
5156         /* Report! */
5157         pr_info("finished\n");
5158         for_each_online_cpu(cpu) {
5159                 struct ring_buffer_event *event;
5160                 struct rb_test_data *data = &rb_data[cpu];
5161                 struct rb_item *item;
5162                 unsigned long total_events;
5163                 unsigned long total_dropped;
5164                 unsigned long total_written;
5165                 unsigned long total_alloc;
5166                 unsigned long total_read = 0;
5167                 unsigned long total_size = 0;
5168                 unsigned long total_len = 0;
5169                 unsigned long total_lost = 0;
5170                 unsigned long lost;
5171                 int big_event_size;
5172                 int small_event_size;
5173
5174                 ret = -1;
5175
5176                 total_events = data->events + data->events_nested;
5177                 total_written = data->bytes_written + data->bytes_written_nested;
5178                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5179                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5180
5181                 big_event_size = data->max_size + data->max_size_nested;
5182                 small_event_size = data->min_size + data->min_size_nested;
5183
5184                 pr_info("CPU %d:\n", cpu);
5185                 pr_info("              events:    %ld\n", total_events);
5186                 pr_info("       dropped bytes:    %ld\n", total_dropped);
5187                 pr_info("       alloced bytes:    %ld\n", total_alloc);
5188                 pr_info("       written bytes:    %ld\n", total_written);
5189                 pr_info("       biggest event:    %d\n", big_event_size);
5190                 pr_info("      smallest event:    %d\n", small_event_size);
5191
5192                 if (RB_WARN_ON(buffer, total_dropped))
5193                         break;
5194
5195                 ret = 0;
5196
5197                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5198                         total_lost += lost;
5199                         item = ring_buffer_event_data(event);
5200                         total_len += ring_buffer_event_length(event);
5201                         total_size += item->size + sizeof(struct rb_item);
5202                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5203                                 pr_info("FAILED!\n");
5204                                 pr_info("buffer had: %.*s\n", item->size, item->str);
5205                                 pr_info("expected:   %.*s\n", item->size, rb_string);
5206                                 RB_WARN_ON(buffer, 1);
5207                                 ret = -1;
5208                                 break;
5209                         }
5210                         total_read++;
5211                 }
5212                 if (ret)
5213                         break;
5214
5215                 ret = -1;
5216
5217                 pr_info("         read events:   %ld\n", total_read);
5218                 pr_info("         lost events:   %ld\n", total_lost);
5219                 pr_info("        total events:   %ld\n", total_lost + total_read);
5220                 pr_info("  recorded len bytes:   %ld\n", total_len);
5221                 pr_info(" recorded size bytes:   %ld\n", total_size);
5222                 if (total_lost)
5223                         pr_info(" With dropped events, record len and size may not match\n"
5224                                 " alloced and written from above\n");
5225                 if (!total_lost) {
5226                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
5227                                        total_size != total_written))
5228                                 break;
5229                 }
5230                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5231                         break;
5232
5233                 ret = 0;
5234         }
5235         if (!ret)
5236                 pr_info("Ring buffer PASSED!\n");
5237
5238         ring_buffer_free(buffer);
5239         return 0;
5240 }
5241
5242 late_initcall(test_ringbuffer);
5243 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */