Merge tag 'trace-ring-buffer-v6.8-rc7-2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / trace / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/trace_recursion.h>
8 #include <linux/trace_events.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/trace_clock.h>
11 #include <linux/sched/clock.h>
12 #include <linux/trace_seq.h>
13 #include <linux/spinlock.h>
14 #include <linux/irq_work.h>
15 #include <linux/security.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kthread.h>      /* for self test */
19 #include <linux/module.h>
20 #include <linux/percpu.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/hash.h>
26 #include <linux/list.h>
27 #include <linux/cpu.h>
28 #include <linux/oom.h>
29
30 #include <asm/local64.h>
31 #include <asm/local.h>
32
33 /*
34  * The "absolute" timestamp in the buffer is only 59 bits.
35  * If a clock has the 5 MSBs set, it needs to be saved and
36  * reinserted.
37  */
38 #define TS_MSB          (0xf8ULL << 56)
39 #define ABS_TS_MASK     (~TS_MSB)
40
41 static void update_pages_handler(struct work_struct *work);
42
43 /*
44  * The ring buffer header is special. We must manually up keep it.
45  */
46 int ring_buffer_print_entry_header(struct trace_seq *s)
47 {
48         trace_seq_puts(s, "# compressed entry header\n");
49         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
50         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
51         trace_seq_puts(s, "\tarray       :   32 bits\n");
52         trace_seq_putc(s, '\n');
53         trace_seq_printf(s, "\tpadding     : type == %d\n",
54                          RINGBUF_TYPE_PADDING);
55         trace_seq_printf(s, "\ttime_extend : type == %d\n",
56                          RINGBUF_TYPE_TIME_EXTEND);
57         trace_seq_printf(s, "\ttime_stamp : type == %d\n",
58                          RINGBUF_TYPE_TIME_STAMP);
59         trace_seq_printf(s, "\tdata max type_len  == %d\n",
60                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
61
62         return !trace_seq_has_overflowed(s);
63 }
64
65 /*
66  * The ring buffer is made up of a list of pages. A separate list of pages is
67  * allocated for each CPU. A writer may only write to a buffer that is
68  * associated with the CPU it is currently executing on.  A reader may read
69  * from any per cpu buffer.
70  *
71  * The reader is special. For each per cpu buffer, the reader has its own
72  * reader page. When a reader has read the entire reader page, this reader
73  * page is swapped with another page in the ring buffer.
74  *
75  * Now, as long as the writer is off the reader page, the reader can do what
76  * ever it wants with that page. The writer will never write to that page
77  * again (as long as it is out of the ring buffer).
78  *
79  * Here's some silly ASCII art.
80  *
81  *   +------+
82  *   |reader|          RING BUFFER
83  *   |page  |
84  *   +------+        +---+   +---+   +---+
85  *                   |   |-->|   |-->|   |
86  *                   +---+   +---+   +---+
87  *                     ^               |
88  *                     |               |
89  *                     +---------------+
90  *
91  *
92  *   +------+
93  *   |reader|          RING BUFFER
94  *   |page  |------------------v
95  *   +------+        +---+   +---+   +---+
96  *                   |   |-->|   |-->|   |
97  *                   +---+   +---+   +---+
98  *                     ^               |
99  *                     |               |
100  *                     +---------------+
101  *
102  *
103  *   +------+
104  *   |reader|          RING BUFFER
105  *   |page  |------------------v
106  *   +------+        +---+   +---+   +---+
107  *      ^            |   |-->|   |-->|   |
108  *      |            +---+   +---+   +---+
109  *      |                              |
110  *      |                              |
111  *      +------------------------------+
112  *
113  *
114  *   +------+
115  *   |buffer|          RING BUFFER
116  *   |page  |------------------v
117  *   +------+        +---+   +---+   +---+
118  *      ^            |   |   |   |-->|   |
119  *      |   New      +---+   +---+   +---+
120  *      |  Reader------^               |
121  *      |   page                       |
122  *      +------------------------------+
123  *
124  *
125  * After we make this swap, the reader can hand this page off to the splice
126  * code and be done with it. It can even allocate a new page if it needs to
127  * and swap that into the ring buffer.
128  *
129  * We will be using cmpxchg soon to make all this lockless.
130  *
131  */
132
133 /* Used for individual buffers (after the counter) */
134 #define RB_BUFFER_OFF           (1 << 20)
135
136 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
137
138 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
139 #define RB_ALIGNMENT            4U
140 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
141 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
142
143 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
144 # define RB_FORCE_8BYTE_ALIGNMENT       0
145 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
146 #else
147 # define RB_FORCE_8BYTE_ALIGNMENT       1
148 # define RB_ARCH_ALIGNMENT              8U
149 #endif
150
151 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
152
153 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
154 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
155
156 enum {
157         RB_LEN_TIME_EXTEND = 8,
158         RB_LEN_TIME_STAMP =  8,
159 };
160
161 #define skip_time_extend(event) \
162         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
163
164 #define extended_time(event) \
165         (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
166
167 static inline bool rb_null_event(struct ring_buffer_event *event)
168 {
169         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
170 }
171
172 static void rb_event_set_padding(struct ring_buffer_event *event)
173 {
174         /* padding has a NULL time_delta */
175         event->type_len = RINGBUF_TYPE_PADDING;
176         event->time_delta = 0;
177 }
178
179 static unsigned
180 rb_event_data_length(struct ring_buffer_event *event)
181 {
182         unsigned length;
183
184         if (event->type_len)
185                 length = event->type_len * RB_ALIGNMENT;
186         else
187                 length = event->array[0];
188         return length + RB_EVNT_HDR_SIZE;
189 }
190
191 /*
192  * Return the length of the given event. Will return
193  * the length of the time extend if the event is a
194  * time extend.
195  */
196 static inline unsigned
197 rb_event_length(struct ring_buffer_event *event)
198 {
199         switch (event->type_len) {
200         case RINGBUF_TYPE_PADDING:
201                 if (rb_null_event(event))
202                         /* undefined */
203                         return -1;
204                 return  event->array[0] + RB_EVNT_HDR_SIZE;
205
206         case RINGBUF_TYPE_TIME_EXTEND:
207                 return RB_LEN_TIME_EXTEND;
208
209         case RINGBUF_TYPE_TIME_STAMP:
210                 return RB_LEN_TIME_STAMP;
211
212         case RINGBUF_TYPE_DATA:
213                 return rb_event_data_length(event);
214         default:
215                 WARN_ON_ONCE(1);
216         }
217         /* not hit */
218         return 0;
219 }
220
221 /*
222  * Return total length of time extend and data,
223  *   or just the event length for all other events.
224  */
225 static inline unsigned
226 rb_event_ts_length(struct ring_buffer_event *event)
227 {
228         unsigned len = 0;
229
230         if (extended_time(event)) {
231                 /* time extends include the data event after it */
232                 len = RB_LEN_TIME_EXTEND;
233                 event = skip_time_extend(event);
234         }
235         return len + rb_event_length(event);
236 }
237
238 /**
239  * ring_buffer_event_length - return the length of the event
240  * @event: the event to get the length of
241  *
242  * Returns the size of the data load of a data event.
243  * If the event is something other than a data event, it
244  * returns the size of the event itself. With the exception
245  * of a TIME EXTEND, where it still returns the size of the
246  * data load of the data event after it.
247  */
248 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
249 {
250         unsigned length;
251
252         if (extended_time(event))
253                 event = skip_time_extend(event);
254
255         length = rb_event_length(event);
256         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
257                 return length;
258         length -= RB_EVNT_HDR_SIZE;
259         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
260                 length -= sizeof(event->array[0]);
261         return length;
262 }
263 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
264
265 /* inline for ring buffer fast paths */
266 static __always_inline void *
267 rb_event_data(struct ring_buffer_event *event)
268 {
269         if (extended_time(event))
270                 event = skip_time_extend(event);
271         WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
272         /* If length is in len field, then array[0] has the data */
273         if (event->type_len)
274                 return (void *)&event->array[0];
275         /* Otherwise length is in array[0] and array[1] has the data */
276         return (void *)&event->array[1];
277 }
278
279 /**
280  * ring_buffer_event_data - return the data of the event
281  * @event: the event to get the data from
282  */
283 void *ring_buffer_event_data(struct ring_buffer_event *event)
284 {
285         return rb_event_data(event);
286 }
287 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
288
289 #define for_each_buffer_cpu(buffer, cpu)                \
290         for_each_cpu(cpu, buffer->cpumask)
291
292 #define for_each_online_buffer_cpu(buffer, cpu)         \
293         for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
294
295 #define TS_SHIFT        27
296 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
297 #define TS_DELTA_TEST   (~TS_MASK)
298
299 static u64 rb_event_time_stamp(struct ring_buffer_event *event)
300 {
301         u64 ts;
302
303         ts = event->array[0];
304         ts <<= TS_SHIFT;
305         ts += event->time_delta;
306
307         return ts;
308 }
309
310 /* Flag when events were overwritten */
311 #define RB_MISSED_EVENTS        (1 << 31)
312 /* Missed count stored at end */
313 #define RB_MISSED_STORED        (1 << 30)
314
315 struct buffer_data_page {
316         u64              time_stamp;    /* page time stamp */
317         local_t          commit;        /* write committed index */
318         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
319 };
320
321 struct buffer_data_read_page {
322         unsigned                order;  /* order of the page */
323         struct buffer_data_page *data;  /* actual data, stored in this page */
324 };
325
326 /*
327  * Note, the buffer_page list must be first. The buffer pages
328  * are allocated in cache lines, which means that each buffer
329  * page will be at the beginning of a cache line, and thus
330  * the least significant bits will be zero. We use this to
331  * add flags in the list struct pointers, to make the ring buffer
332  * lockless.
333  */
334 struct buffer_page {
335         struct list_head list;          /* list of buffer pages */
336         local_t          write;         /* index for next write */
337         unsigned         read;          /* index for next read */
338         local_t          entries;       /* entries on this page */
339         unsigned long    real_end;      /* real end of data */
340         unsigned         order;         /* order of the page */
341         struct buffer_data_page *page;  /* Actual data page */
342 };
343
344 /*
345  * The buffer page counters, write and entries, must be reset
346  * atomically when crossing page boundaries. To synchronize this
347  * update, two counters are inserted into the number. One is
348  * the actual counter for the write position or count on the page.
349  *
350  * The other is a counter of updaters. Before an update happens
351  * the update partition of the counter is incremented. This will
352  * allow the updater to update the counter atomically.
353  *
354  * The counter is 20 bits, and the state data is 12.
355  */
356 #define RB_WRITE_MASK           0xfffff
357 #define RB_WRITE_INTCNT         (1 << 20)
358
359 static void rb_init_page(struct buffer_data_page *bpage)
360 {
361         local_set(&bpage->commit, 0);
362 }
363
364 static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
365 {
366         return local_read(&bpage->page->commit);
367 }
368
369 static void free_buffer_page(struct buffer_page *bpage)
370 {
371         free_pages((unsigned long)bpage->page, bpage->order);
372         kfree(bpage);
373 }
374
375 /*
376  * We need to fit the time_stamp delta into 27 bits.
377  */
378 static inline bool test_time_stamp(u64 delta)
379 {
380         return !!(delta & TS_DELTA_TEST);
381 }
382
383 struct rb_irq_work {
384         struct irq_work                 work;
385         wait_queue_head_t               waiters;
386         wait_queue_head_t               full_waiters;
387         bool                            waiters_pending;
388         bool                            full_waiters_pending;
389         bool                            wakeup_full;
390 };
391
392 /*
393  * Structure to hold event state and handle nested events.
394  */
395 struct rb_event_info {
396         u64                     ts;
397         u64                     delta;
398         u64                     before;
399         u64                     after;
400         unsigned long           length;
401         struct buffer_page      *tail_page;
402         int                     add_timestamp;
403 };
404
405 /*
406  * Used for the add_timestamp
407  *  NONE
408  *  EXTEND - wants a time extend
409  *  ABSOLUTE - the buffer requests all events to have absolute time stamps
410  *  FORCE - force a full time stamp.
411  */
412 enum {
413         RB_ADD_STAMP_NONE               = 0,
414         RB_ADD_STAMP_EXTEND             = BIT(1),
415         RB_ADD_STAMP_ABSOLUTE           = BIT(2),
416         RB_ADD_STAMP_FORCE              = BIT(3)
417 };
418 /*
419  * Used for which event context the event is in.
420  *  TRANSITION = 0
421  *  NMI     = 1
422  *  IRQ     = 2
423  *  SOFTIRQ = 3
424  *  NORMAL  = 4
425  *
426  * See trace_recursive_lock() comment below for more details.
427  */
428 enum {
429         RB_CTX_TRANSITION,
430         RB_CTX_NMI,
431         RB_CTX_IRQ,
432         RB_CTX_SOFTIRQ,
433         RB_CTX_NORMAL,
434         RB_CTX_MAX
435 };
436
437 struct rb_time_struct {
438         local64_t       time;
439 };
440 typedef struct rb_time_struct rb_time_t;
441
442 #define MAX_NEST        5
443
444 /*
445  * head_page == tail_page && head == tail then buffer is empty.
446  */
447 struct ring_buffer_per_cpu {
448         int                             cpu;
449         atomic_t                        record_disabled;
450         atomic_t                        resize_disabled;
451         struct trace_buffer     *buffer;
452         raw_spinlock_t                  reader_lock;    /* serialize readers */
453         arch_spinlock_t                 lock;
454         struct lock_class_key           lock_key;
455         struct buffer_data_page         *free_page;
456         unsigned long                   nr_pages;
457         unsigned int                    current_context;
458         struct list_head                *pages;
459         struct buffer_page              *head_page;     /* read from head */
460         struct buffer_page              *tail_page;     /* write to tail */
461         struct buffer_page              *commit_page;   /* committed pages */
462         struct buffer_page              *reader_page;
463         unsigned long                   lost_events;
464         unsigned long                   last_overrun;
465         unsigned long                   nest;
466         local_t                         entries_bytes;
467         local_t                         entries;
468         local_t                         overrun;
469         local_t                         commit_overrun;
470         local_t                         dropped_events;
471         local_t                         committing;
472         local_t                         commits;
473         local_t                         pages_touched;
474         local_t                         pages_lost;
475         local_t                         pages_read;
476         long                            last_pages_touch;
477         size_t                          shortest_full;
478         unsigned long                   read;
479         unsigned long                   read_bytes;
480         rb_time_t                       write_stamp;
481         rb_time_t                       before_stamp;
482         u64                             event_stamp[MAX_NEST];
483         u64                             read_stamp;
484         /* pages removed since last reset */
485         unsigned long                   pages_removed;
486         /* ring buffer pages to update, > 0 to add, < 0 to remove */
487         long                            nr_pages_to_update;
488         struct list_head                new_pages; /* new pages to add */
489         struct work_struct              update_pages_work;
490         struct completion               update_done;
491
492         struct rb_irq_work              irq_work;
493 };
494
495 struct trace_buffer {
496         unsigned                        flags;
497         int                             cpus;
498         atomic_t                        record_disabled;
499         atomic_t                        resizing;
500         cpumask_var_t                   cpumask;
501
502         struct lock_class_key           *reader_lock_key;
503
504         struct mutex                    mutex;
505
506         struct ring_buffer_per_cpu      **buffers;
507
508         struct hlist_node               node;
509         u64                             (*clock)(void);
510
511         struct rb_irq_work              irq_work;
512         bool                            time_stamp_abs;
513
514         unsigned int                    subbuf_size;
515         unsigned int                    subbuf_order;
516         unsigned int                    max_data_size;
517 };
518
519 struct ring_buffer_iter {
520         struct ring_buffer_per_cpu      *cpu_buffer;
521         unsigned long                   head;
522         unsigned long                   next_event;
523         struct buffer_page              *head_page;
524         struct buffer_page              *cache_reader_page;
525         unsigned long                   cache_read;
526         unsigned long                   cache_pages_removed;
527         u64                             read_stamp;
528         u64                             page_stamp;
529         struct ring_buffer_event        *event;
530         size_t                          event_size;
531         int                             missed_events;
532 };
533
534 int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
535 {
536         struct buffer_data_page field;
537
538         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
539                          "offset:0;\tsize:%u;\tsigned:%u;\n",
540                          (unsigned int)sizeof(field.time_stamp),
541                          (unsigned int)is_signed_type(u64));
542
543         trace_seq_printf(s, "\tfield: local_t commit;\t"
544                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
545                          (unsigned int)offsetof(typeof(field), commit),
546                          (unsigned int)sizeof(field.commit),
547                          (unsigned int)is_signed_type(long));
548
549         trace_seq_printf(s, "\tfield: int overwrite;\t"
550                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
551                          (unsigned int)offsetof(typeof(field), commit),
552                          1,
553                          (unsigned int)is_signed_type(long));
554
555         trace_seq_printf(s, "\tfield: char data;\t"
556                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
557                          (unsigned int)offsetof(typeof(field), data),
558                          (unsigned int)buffer->subbuf_size,
559                          (unsigned int)is_signed_type(char));
560
561         return !trace_seq_has_overflowed(s);
562 }
563
564 static inline void rb_time_read(rb_time_t *t, u64 *ret)
565 {
566         *ret = local64_read(&t->time);
567 }
568 static void rb_time_set(rb_time_t *t, u64 val)
569 {
570         local64_set(&t->time, val);
571 }
572
573 /*
574  * Enable this to make sure that the event passed to
575  * ring_buffer_event_time_stamp() is not committed and also
576  * is on the buffer that it passed in.
577  */
578 //#define RB_VERIFY_EVENT
579 #ifdef RB_VERIFY_EVENT
580 static struct list_head *rb_list_head(struct list_head *list);
581 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
582                          void *event)
583 {
584         struct buffer_page *page = cpu_buffer->commit_page;
585         struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
586         struct list_head *next;
587         long commit, write;
588         unsigned long addr = (unsigned long)event;
589         bool done = false;
590         int stop = 0;
591
592         /* Make sure the event exists and is not committed yet */
593         do {
594                 if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
595                         done = true;
596                 commit = local_read(&page->page->commit);
597                 write = local_read(&page->write);
598                 if (addr >= (unsigned long)&page->page->data[commit] &&
599                     addr < (unsigned long)&page->page->data[write])
600                         return;
601
602                 next = rb_list_head(page->list.next);
603                 page = list_entry(next, struct buffer_page, list);
604         } while (!done);
605         WARN_ON_ONCE(1);
606 }
607 #else
608 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
609                          void *event)
610 {
611 }
612 #endif
613
614 /*
615  * The absolute time stamp drops the 5 MSBs and some clocks may
616  * require them. The rb_fix_abs_ts() will take a previous full
617  * time stamp, and add the 5 MSB of that time stamp on to the
618  * saved absolute time stamp. Then they are compared in case of
619  * the unlikely event that the latest time stamp incremented
620  * the 5 MSB.
621  */
622 static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
623 {
624         if (save_ts & TS_MSB) {
625                 abs |= save_ts & TS_MSB;
626                 /* Check for overflow */
627                 if (unlikely(abs < save_ts))
628                         abs += 1ULL << 59;
629         }
630         return abs;
631 }
632
633 static inline u64 rb_time_stamp(struct trace_buffer *buffer);
634
635 /**
636  * ring_buffer_event_time_stamp - return the event's current time stamp
637  * @buffer: The buffer that the event is on
638  * @event: the event to get the time stamp of
639  *
640  * Note, this must be called after @event is reserved, and before it is
641  * committed to the ring buffer. And must be called from the same
642  * context where the event was reserved (normal, softirq, irq, etc).
643  *
644  * Returns the time stamp associated with the current event.
645  * If the event has an extended time stamp, then that is used as
646  * the time stamp to return.
647  * In the highly unlikely case that the event was nested more than
648  * the max nesting, then the write_stamp of the buffer is returned,
649  * otherwise  current time is returned, but that really neither of
650  * the last two cases should ever happen.
651  */
652 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
653                                  struct ring_buffer_event *event)
654 {
655         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
656         unsigned int nest;
657         u64 ts;
658
659         /* If the event includes an absolute time, then just use that */
660         if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
661                 ts = rb_event_time_stamp(event);
662                 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
663         }
664
665         nest = local_read(&cpu_buffer->committing);
666         verify_event(cpu_buffer, event);
667         if (WARN_ON_ONCE(!nest))
668                 goto fail;
669
670         /* Read the current saved nesting level time stamp */
671         if (likely(--nest < MAX_NEST))
672                 return cpu_buffer->event_stamp[nest];
673
674         /* Shouldn't happen, warn if it does */
675         WARN_ONCE(1, "nest (%d) greater than max", nest);
676
677  fail:
678         rb_time_read(&cpu_buffer->write_stamp, &ts);
679
680         return ts;
681 }
682
683 /**
684  * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
685  * @buffer: The ring_buffer to get the number of pages from
686  * @cpu: The cpu of the ring_buffer to get the number of pages from
687  *
688  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
689  */
690 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
691 {
692         return buffer->buffers[cpu]->nr_pages;
693 }
694
695 /**
696  * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
697  * @buffer: The ring_buffer to get the number of pages from
698  * @cpu: The cpu of the ring_buffer to get the number of pages from
699  *
700  * Returns the number of pages that have content in the ring buffer.
701  */
702 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
703 {
704         size_t read;
705         size_t lost;
706         size_t cnt;
707
708         read = local_read(&buffer->buffers[cpu]->pages_read);
709         lost = local_read(&buffer->buffers[cpu]->pages_lost);
710         cnt = local_read(&buffer->buffers[cpu]->pages_touched);
711
712         if (WARN_ON_ONCE(cnt < lost))
713                 return 0;
714
715         cnt -= lost;
716
717         /* The reader can read an empty page, but not more than that */
718         if (cnt < read) {
719                 WARN_ON_ONCE(read > cnt + 1);
720                 return 0;
721         }
722
723         return cnt - read;
724 }
725
726 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
727 {
728         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
729         size_t nr_pages;
730         size_t dirty;
731
732         nr_pages = cpu_buffer->nr_pages;
733         if (!nr_pages || !full)
734                 return true;
735
736         /*
737          * Add one as dirty will never equal nr_pages, as the sub-buffer
738          * that the writer is on is not counted as dirty.
739          * This is needed if "buffer_percent" is set to 100.
740          */
741         dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
742
743         return (dirty * 100) >= (full * nr_pages);
744 }
745
746 /*
747  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
748  *
749  * Schedules a delayed work to wake up any task that is blocked on the
750  * ring buffer waiters queue.
751  */
752 static void rb_wake_up_waiters(struct irq_work *work)
753 {
754         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
755
756         wake_up_all(&rbwork->waiters);
757         if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
758                 /* Only cpu_buffer sets the above flags */
759                 struct ring_buffer_per_cpu *cpu_buffer =
760                         container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
761
762                 /* Called from interrupt context */
763                 raw_spin_lock(&cpu_buffer->reader_lock);
764                 rbwork->wakeup_full = false;
765                 rbwork->full_waiters_pending = false;
766
767                 /* Waking up all waiters, they will reset the shortest full */
768                 cpu_buffer->shortest_full = 0;
769                 raw_spin_unlock(&cpu_buffer->reader_lock);
770
771                 wake_up_all(&rbwork->full_waiters);
772         }
773 }
774
775 /**
776  * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
777  * @buffer: The ring buffer to wake waiters on
778  * @cpu: The CPU buffer to wake waiters on
779  *
780  * In the case of a file that represents a ring buffer is closing,
781  * it is prudent to wake up any waiters that are on this.
782  */
783 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
784 {
785         struct ring_buffer_per_cpu *cpu_buffer;
786         struct rb_irq_work *rbwork;
787
788         if (!buffer)
789                 return;
790
791         if (cpu == RING_BUFFER_ALL_CPUS) {
792
793                 /* Wake up individual ones too. One level recursion */
794                 for_each_buffer_cpu(buffer, cpu)
795                         ring_buffer_wake_waiters(buffer, cpu);
796
797                 rbwork = &buffer->irq_work;
798         } else {
799                 if (WARN_ON_ONCE(!buffer->buffers))
800                         return;
801                 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
802                         return;
803
804                 cpu_buffer = buffer->buffers[cpu];
805                 /* The CPU buffer may not have been initialized yet */
806                 if (!cpu_buffer)
807                         return;
808                 rbwork = &cpu_buffer->irq_work;
809         }
810
811         /* This can be called in any context */
812         irq_work_queue(&rbwork->work);
813 }
814
815 static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
816 {
817         struct ring_buffer_per_cpu *cpu_buffer;
818         bool ret = false;
819
820         /* Reads of all CPUs always waits for any data */
821         if (cpu == RING_BUFFER_ALL_CPUS)
822                 return !ring_buffer_empty(buffer);
823
824         cpu_buffer = buffer->buffers[cpu];
825
826         if (!ring_buffer_empty_cpu(buffer, cpu)) {
827                 unsigned long flags;
828                 bool pagebusy;
829
830                 if (!full)
831                         return true;
832
833                 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
834                 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
835                 ret = !pagebusy && full_hit(buffer, cpu, full);
836
837                 if (!ret && (!cpu_buffer->shortest_full ||
838                              cpu_buffer->shortest_full > full)) {
839                     cpu_buffer->shortest_full = full;
840                 }
841                 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
842         }
843         return ret;
844 }
845
846 static inline bool
847 rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
848              int cpu, int full, ring_buffer_cond_fn cond, void *data)
849 {
850         if (rb_watermark_hit(buffer, cpu, full))
851                 return true;
852
853         if (cond(data))
854                 return true;
855
856         /*
857          * The events can happen in critical sections where
858          * checking a work queue can cause deadlocks.
859          * After adding a task to the queue, this flag is set
860          * only to notify events to try to wake up the queue
861          * using irq_work.
862          *
863          * We don't clear it even if the buffer is no longer
864          * empty. The flag only causes the next event to run
865          * irq_work to do the work queue wake up. The worse
866          * that can happen if we race with !trace_empty() is that
867          * an event will cause an irq_work to try to wake up
868          * an empty queue.
869          *
870          * There's no reason to protect this flag either, as
871          * the work queue and irq_work logic will do the necessary
872          * synchronization for the wake ups. The only thing
873          * that is necessary is that the wake up happens after
874          * a task has been queued. It's OK for spurious wake ups.
875          */
876         if (full)
877                 rbwork->full_waiters_pending = true;
878         else
879                 rbwork->waiters_pending = true;
880
881         return false;
882 }
883
884 /*
885  * The default wait condition for ring_buffer_wait() is to just to exit the
886  * wait loop the first time it is woken up.
887  */
888 static bool rb_wait_once(void *data)
889 {
890         long *once = data;
891
892         /* wait_event() actually calls this twice before scheduling*/
893         if (*once > 1)
894                 return true;
895
896         (*once)++;
897         return false;
898 }
899
900 /**
901  * ring_buffer_wait - wait for input to the ring buffer
902  * @buffer: buffer to wait on
903  * @cpu: the cpu buffer to wait on
904  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
905  * @cond: condition function to break out of wait (NULL to run once)
906  * @data: the data to pass to @cond.
907  *
908  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
909  * as data is added to any of the @buffer's cpu buffers. Otherwise
910  * it will wait for data to be added to a specific cpu buffer.
911  */
912 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
913                      ring_buffer_cond_fn cond, void *data)
914 {
915         struct ring_buffer_per_cpu *cpu_buffer;
916         struct wait_queue_head *waitq;
917         struct rb_irq_work *rbwork;
918         long once = 0;
919         int ret = 0;
920
921         if (!cond) {
922                 cond = rb_wait_once;
923                 data = &once;
924         }
925
926         /*
927          * Depending on what the caller is waiting for, either any
928          * data in any cpu buffer, or a specific buffer, put the
929          * caller on the appropriate wait queue.
930          */
931         if (cpu == RING_BUFFER_ALL_CPUS) {
932                 rbwork = &buffer->irq_work;
933                 /* Full only makes sense on per cpu reads */
934                 full = 0;
935         } else {
936                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
937                         return -ENODEV;
938                 cpu_buffer = buffer->buffers[cpu];
939                 rbwork = &cpu_buffer->irq_work;
940         }
941
942         if (full)
943                 waitq = &rbwork->full_waiters;
944         else
945                 waitq = &rbwork->waiters;
946
947         ret = wait_event_interruptible((*waitq),
948                                 rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
949
950         return ret;
951 }
952
953 /**
954  * ring_buffer_poll_wait - poll on buffer input
955  * @buffer: buffer to wait on
956  * @cpu: the cpu buffer to wait on
957  * @filp: the file descriptor
958  * @poll_table: The poll descriptor
959  * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
960  *
961  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
962  * as data is added to any of the @buffer's cpu buffers. Otherwise
963  * it will wait for data to be added to a specific cpu buffer.
964  *
965  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
966  * zero otherwise.
967  */
968 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
969                           struct file *filp, poll_table *poll_table, int full)
970 {
971         struct ring_buffer_per_cpu *cpu_buffer;
972         struct rb_irq_work *rbwork;
973
974         if (cpu == RING_BUFFER_ALL_CPUS) {
975                 rbwork = &buffer->irq_work;
976                 full = 0;
977         } else {
978                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
979                         return EPOLLERR;
980
981                 cpu_buffer = buffer->buffers[cpu];
982                 rbwork = &cpu_buffer->irq_work;
983         }
984
985         if (full) {
986                 poll_wait(filp, &rbwork->full_waiters, poll_table);
987
988                 if (rb_watermark_hit(buffer, cpu, full))
989                         return EPOLLIN | EPOLLRDNORM;
990                 /*
991                  * Only allow full_waiters_pending update to be seen after
992                  * the shortest_full is set (in rb_watermark_hit). If the
993                  * writer sees the full_waiters_pending flag set, it will
994                  * compare the amount in the ring buffer to shortest_full.
995                  * If the amount in the ring buffer is greater than the
996                  * shortest_full percent, it will call the irq_work handler
997                  * to wake up this list. The irq_handler will reset shortest_full
998                  * back to zero. That's done under the reader_lock, but
999                  * the below smp_mb() makes sure that the update to
1000                  * full_waiters_pending doesn't leak up into the above.
1001                  */
1002                 smp_mb();
1003                 rbwork->full_waiters_pending = true;
1004                 return 0;
1005         }
1006
1007         poll_wait(filp, &rbwork->waiters, poll_table);
1008         rbwork->waiters_pending = true;
1009
1010         /*
1011          * There's a tight race between setting the waiters_pending and
1012          * checking if the ring buffer is empty.  Once the waiters_pending bit
1013          * is set, the next event will wake the task up, but we can get stuck
1014          * if there's only a single event in.
1015          *
1016          * FIXME: Ideally, we need a memory barrier on the writer side as well,
1017          * but adding a memory barrier to all events will cause too much of a
1018          * performance hit in the fast path.  We only need a memory barrier when
1019          * the buffer goes from empty to having content.  But as this race is
1020          * extremely small, and it's not a problem if another event comes in, we
1021          * will fix it later.
1022          */
1023         smp_mb();
1024
1025         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1026             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1027                 return EPOLLIN | EPOLLRDNORM;
1028         return 0;
1029 }
1030
1031 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1032 #define RB_WARN_ON(b, cond)                                             \
1033         ({                                                              \
1034                 int _____ret = unlikely(cond);                          \
1035                 if (_____ret) {                                         \
1036                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1037                                 struct ring_buffer_per_cpu *__b =       \
1038                                         (void *)b;                      \
1039                                 atomic_inc(&__b->buffer->record_disabled); \
1040                         } else                                          \
1041                                 atomic_inc(&b->record_disabled);        \
1042                         WARN_ON(1);                                     \
1043                 }                                                       \
1044                 _____ret;                                               \
1045         })
1046
1047 /* Up this if you want to test the TIME_EXTENTS and normalization */
1048 #define DEBUG_SHIFT 0
1049
1050 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1051 {
1052         u64 ts;
1053
1054         /* Skip retpolines :-( */
1055         if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1056                 ts = trace_clock_local();
1057         else
1058                 ts = buffer->clock();
1059
1060         /* shift to debug/test normalization and TIME_EXTENTS */
1061         return ts << DEBUG_SHIFT;
1062 }
1063
1064 u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1065 {
1066         u64 time;
1067
1068         preempt_disable_notrace();
1069         time = rb_time_stamp(buffer);
1070         preempt_enable_notrace();
1071
1072         return time;
1073 }
1074 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1075
1076 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1077                                       int cpu, u64 *ts)
1078 {
1079         /* Just stupid testing the normalize function and deltas */
1080         *ts >>= DEBUG_SHIFT;
1081 }
1082 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1083
1084 /*
1085  * Making the ring buffer lockless makes things tricky.
1086  * Although writes only happen on the CPU that they are on,
1087  * and they only need to worry about interrupts. Reads can
1088  * happen on any CPU.
1089  *
1090  * The reader page is always off the ring buffer, but when the
1091  * reader finishes with a page, it needs to swap its page with
1092  * a new one from the buffer. The reader needs to take from
1093  * the head (writes go to the tail). But if a writer is in overwrite
1094  * mode and wraps, it must push the head page forward.
1095  *
1096  * Here lies the problem.
1097  *
1098  * The reader must be careful to replace only the head page, and
1099  * not another one. As described at the top of the file in the
1100  * ASCII art, the reader sets its old page to point to the next
1101  * page after head. It then sets the page after head to point to
1102  * the old reader page. But if the writer moves the head page
1103  * during this operation, the reader could end up with the tail.
1104  *
1105  * We use cmpxchg to help prevent this race. We also do something
1106  * special with the page before head. We set the LSB to 1.
1107  *
1108  * When the writer must push the page forward, it will clear the
1109  * bit that points to the head page, move the head, and then set
1110  * the bit that points to the new head page.
1111  *
1112  * We also don't want an interrupt coming in and moving the head
1113  * page on another writer. Thus we use the second LSB to catch
1114  * that too. Thus:
1115  *
1116  * head->list->prev->next        bit 1          bit 0
1117  *                              -------        -------
1118  * Normal page                     0              0
1119  * Points to head page             0              1
1120  * New head page                   1              0
1121  *
1122  * Note we can not trust the prev pointer of the head page, because:
1123  *
1124  * +----+       +-----+        +-----+
1125  * |    |------>|  T  |---X--->|  N  |
1126  * |    |<------|     |        |     |
1127  * +----+       +-----+        +-----+
1128  *   ^                           ^ |
1129  *   |          +-----+          | |
1130  *   +----------|  R  |----------+ |
1131  *              |     |<-----------+
1132  *              +-----+
1133  *
1134  * Key:  ---X-->  HEAD flag set in pointer
1135  *         T      Tail page
1136  *         R      Reader page
1137  *         N      Next page
1138  *
1139  * (see __rb_reserve_next() to see where this happens)
1140  *
1141  *  What the above shows is that the reader just swapped out
1142  *  the reader page with a page in the buffer, but before it
1143  *  could make the new header point back to the new page added
1144  *  it was preempted by a writer. The writer moved forward onto
1145  *  the new page added by the reader and is about to move forward
1146  *  again.
1147  *
1148  *  You can see, it is legitimate for the previous pointer of
1149  *  the head (or any page) not to point back to itself. But only
1150  *  temporarily.
1151  */
1152
1153 #define RB_PAGE_NORMAL          0UL
1154 #define RB_PAGE_HEAD            1UL
1155 #define RB_PAGE_UPDATE          2UL
1156
1157
1158 #define RB_FLAG_MASK            3UL
1159
1160 /* PAGE_MOVED is not part of the mask */
1161 #define RB_PAGE_MOVED           4UL
1162
1163 /*
1164  * rb_list_head - remove any bit
1165  */
1166 static struct list_head *rb_list_head(struct list_head *list)
1167 {
1168         unsigned long val = (unsigned long)list;
1169
1170         return (struct list_head *)(val & ~RB_FLAG_MASK);
1171 }
1172
1173 /*
1174  * rb_is_head_page - test if the given page is the head page
1175  *
1176  * Because the reader may move the head_page pointer, we can
1177  * not trust what the head page is (it may be pointing to
1178  * the reader page). But if the next page is a header page,
1179  * its flags will be non zero.
1180  */
1181 static inline int
1182 rb_is_head_page(struct buffer_page *page, struct list_head *list)
1183 {
1184         unsigned long val;
1185
1186         val = (unsigned long)list->next;
1187
1188         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1189                 return RB_PAGE_MOVED;
1190
1191         return val & RB_FLAG_MASK;
1192 }
1193
1194 /*
1195  * rb_is_reader_page
1196  *
1197  * The unique thing about the reader page, is that, if the
1198  * writer is ever on it, the previous pointer never points
1199  * back to the reader page.
1200  */
1201 static bool rb_is_reader_page(struct buffer_page *page)
1202 {
1203         struct list_head *list = page->list.prev;
1204
1205         return rb_list_head(list->next) != &page->list;
1206 }
1207
1208 /*
1209  * rb_set_list_to_head - set a list_head to be pointing to head.
1210  */
1211 static void rb_set_list_to_head(struct list_head *list)
1212 {
1213         unsigned long *ptr;
1214
1215         ptr = (unsigned long *)&list->next;
1216         *ptr |= RB_PAGE_HEAD;
1217         *ptr &= ~RB_PAGE_UPDATE;
1218 }
1219
1220 /*
1221  * rb_head_page_activate - sets up head page
1222  */
1223 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1224 {
1225         struct buffer_page *head;
1226
1227         head = cpu_buffer->head_page;
1228         if (!head)
1229                 return;
1230
1231         /*
1232          * Set the previous list pointer to have the HEAD flag.
1233          */
1234         rb_set_list_to_head(head->list.prev);
1235 }
1236
1237 static void rb_list_head_clear(struct list_head *list)
1238 {
1239         unsigned long *ptr = (unsigned long *)&list->next;
1240
1241         *ptr &= ~RB_FLAG_MASK;
1242 }
1243
1244 /*
1245  * rb_head_page_deactivate - clears head page ptr (for free list)
1246  */
1247 static void
1248 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1249 {
1250         struct list_head *hd;
1251
1252         /* Go through the whole list and clear any pointers found. */
1253         rb_list_head_clear(cpu_buffer->pages);
1254
1255         list_for_each(hd, cpu_buffer->pages)
1256                 rb_list_head_clear(hd);
1257 }
1258
1259 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1260                             struct buffer_page *head,
1261                             struct buffer_page *prev,
1262                             int old_flag, int new_flag)
1263 {
1264         struct list_head *list;
1265         unsigned long val = (unsigned long)&head->list;
1266         unsigned long ret;
1267
1268         list = &prev->list;
1269
1270         val &= ~RB_FLAG_MASK;
1271
1272         ret = cmpxchg((unsigned long *)&list->next,
1273                       val | old_flag, val | new_flag);
1274
1275         /* check if the reader took the page */
1276         if ((ret & ~RB_FLAG_MASK) != val)
1277                 return RB_PAGE_MOVED;
1278
1279         return ret & RB_FLAG_MASK;
1280 }
1281
1282 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1283                                    struct buffer_page *head,
1284                                    struct buffer_page *prev,
1285                                    int old_flag)
1286 {
1287         return rb_head_page_set(cpu_buffer, head, prev,
1288                                 old_flag, RB_PAGE_UPDATE);
1289 }
1290
1291 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1292                                  struct buffer_page *head,
1293                                  struct buffer_page *prev,
1294                                  int old_flag)
1295 {
1296         return rb_head_page_set(cpu_buffer, head, prev,
1297                                 old_flag, RB_PAGE_HEAD);
1298 }
1299
1300 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1301                                    struct buffer_page *head,
1302                                    struct buffer_page *prev,
1303                                    int old_flag)
1304 {
1305         return rb_head_page_set(cpu_buffer, head, prev,
1306                                 old_flag, RB_PAGE_NORMAL);
1307 }
1308
1309 static inline void rb_inc_page(struct buffer_page **bpage)
1310 {
1311         struct list_head *p = rb_list_head((*bpage)->list.next);
1312
1313         *bpage = list_entry(p, struct buffer_page, list);
1314 }
1315
1316 static struct buffer_page *
1317 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1318 {
1319         struct buffer_page *head;
1320         struct buffer_page *page;
1321         struct list_head *list;
1322         int i;
1323
1324         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1325                 return NULL;
1326
1327         /* sanity check */
1328         list = cpu_buffer->pages;
1329         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1330                 return NULL;
1331
1332         page = head = cpu_buffer->head_page;
1333         /*
1334          * It is possible that the writer moves the header behind
1335          * where we started, and we miss in one loop.
1336          * A second loop should grab the header, but we'll do
1337          * three loops just because I'm paranoid.
1338          */
1339         for (i = 0; i < 3; i++) {
1340                 do {
1341                         if (rb_is_head_page(page, page->list.prev)) {
1342                                 cpu_buffer->head_page = page;
1343                                 return page;
1344                         }
1345                         rb_inc_page(&page);
1346                 } while (page != head);
1347         }
1348
1349         RB_WARN_ON(cpu_buffer, 1);
1350
1351         return NULL;
1352 }
1353
1354 static bool rb_head_page_replace(struct buffer_page *old,
1355                                 struct buffer_page *new)
1356 {
1357         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1358         unsigned long val;
1359
1360         val = *ptr & ~RB_FLAG_MASK;
1361         val |= RB_PAGE_HEAD;
1362
1363         return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1364 }
1365
1366 /*
1367  * rb_tail_page_update - move the tail page forward
1368  */
1369 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1370                                struct buffer_page *tail_page,
1371                                struct buffer_page *next_page)
1372 {
1373         unsigned long old_entries;
1374         unsigned long old_write;
1375
1376         /*
1377          * The tail page now needs to be moved forward.
1378          *
1379          * We need to reset the tail page, but without messing
1380          * with possible erasing of data brought in by interrupts
1381          * that have moved the tail page and are currently on it.
1382          *
1383          * We add a counter to the write field to denote this.
1384          */
1385         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1386         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1387
1388         local_inc(&cpu_buffer->pages_touched);
1389         /*
1390          * Just make sure we have seen our old_write and synchronize
1391          * with any interrupts that come in.
1392          */
1393         barrier();
1394
1395         /*
1396          * If the tail page is still the same as what we think
1397          * it is, then it is up to us to update the tail
1398          * pointer.
1399          */
1400         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1401                 /* Zero the write counter */
1402                 unsigned long val = old_write & ~RB_WRITE_MASK;
1403                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1404
1405                 /*
1406                  * This will only succeed if an interrupt did
1407                  * not come in and change it. In which case, we
1408                  * do not want to modify it.
1409                  *
1410                  * We add (void) to let the compiler know that we do not care
1411                  * about the return value of these functions. We use the
1412                  * cmpxchg to only update if an interrupt did not already
1413                  * do it for us. If the cmpxchg fails, we don't care.
1414                  */
1415                 (void)local_cmpxchg(&next_page->write, old_write, val);
1416                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1417
1418                 /*
1419                  * No need to worry about races with clearing out the commit.
1420                  * it only can increment when a commit takes place. But that
1421                  * only happens in the outer most nested commit.
1422                  */
1423                 local_set(&next_page->page->commit, 0);
1424
1425                 /* Again, either we update tail_page or an interrupt does */
1426                 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1427         }
1428 }
1429
1430 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1431                           struct buffer_page *bpage)
1432 {
1433         unsigned long val = (unsigned long)bpage;
1434
1435         RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1436 }
1437
1438 /**
1439  * rb_check_pages - integrity check of buffer pages
1440  * @cpu_buffer: CPU buffer with pages to test
1441  *
1442  * As a safety measure we check to make sure the data pages have not
1443  * been corrupted.
1444  */
1445 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1446 {
1447         struct list_head *head = rb_list_head(cpu_buffer->pages);
1448         struct list_head *tmp;
1449
1450         if (RB_WARN_ON(cpu_buffer,
1451                         rb_list_head(rb_list_head(head->next)->prev) != head))
1452                 return;
1453
1454         if (RB_WARN_ON(cpu_buffer,
1455                         rb_list_head(rb_list_head(head->prev)->next) != head))
1456                 return;
1457
1458         for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1459                 if (RB_WARN_ON(cpu_buffer,
1460                                 rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1461                         return;
1462
1463                 if (RB_WARN_ON(cpu_buffer,
1464                                 rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1465                         return;
1466         }
1467 }
1468
1469 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1470                 long nr_pages, struct list_head *pages)
1471 {
1472         struct buffer_page *bpage, *tmp;
1473         bool user_thread = current->mm != NULL;
1474         gfp_t mflags;
1475         long i;
1476
1477         /*
1478          * Check if the available memory is there first.
1479          * Note, si_mem_available() only gives us a rough estimate of available
1480          * memory. It may not be accurate. But we don't care, we just want
1481          * to prevent doing any allocation when it is obvious that it is
1482          * not going to succeed.
1483          */
1484         i = si_mem_available();
1485         if (i < nr_pages)
1486                 return -ENOMEM;
1487
1488         /*
1489          * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1490          * gracefully without invoking oom-killer and the system is not
1491          * destabilized.
1492          */
1493         mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1494
1495         /*
1496          * If a user thread allocates too much, and si_mem_available()
1497          * reports there's enough memory, even though there is not.
1498          * Make sure the OOM killer kills this thread. This can happen
1499          * even with RETRY_MAYFAIL because another task may be doing
1500          * an allocation after this task has taken all memory.
1501          * This is the task the OOM killer needs to take out during this
1502          * loop, even if it was triggered by an allocation somewhere else.
1503          */
1504         if (user_thread)
1505                 set_current_oom_origin();
1506         for (i = 0; i < nr_pages; i++) {
1507                 struct page *page;
1508
1509                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1510                                     mflags, cpu_to_node(cpu_buffer->cpu));
1511                 if (!bpage)
1512                         goto free_pages;
1513
1514                 rb_check_bpage(cpu_buffer, bpage);
1515
1516                 list_add(&bpage->list, pages);
1517
1518                 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
1519                                         cpu_buffer->buffer->subbuf_order);
1520                 if (!page)
1521                         goto free_pages;
1522                 bpage->page = page_address(page);
1523                 bpage->order = cpu_buffer->buffer->subbuf_order;
1524                 rb_init_page(bpage->page);
1525
1526                 if (user_thread && fatal_signal_pending(current))
1527                         goto free_pages;
1528         }
1529         if (user_thread)
1530                 clear_current_oom_origin();
1531
1532         return 0;
1533
1534 free_pages:
1535         list_for_each_entry_safe(bpage, tmp, pages, list) {
1536                 list_del_init(&bpage->list);
1537                 free_buffer_page(bpage);
1538         }
1539         if (user_thread)
1540                 clear_current_oom_origin();
1541
1542         return -ENOMEM;
1543 }
1544
1545 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1546                              unsigned long nr_pages)
1547 {
1548         LIST_HEAD(pages);
1549
1550         WARN_ON(!nr_pages);
1551
1552         if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1553                 return -ENOMEM;
1554
1555         /*
1556          * The ring buffer page list is a circular list that does not
1557          * start and end with a list head. All page list items point to
1558          * other pages.
1559          */
1560         cpu_buffer->pages = pages.next;
1561         list_del(&pages);
1562
1563         cpu_buffer->nr_pages = nr_pages;
1564
1565         rb_check_pages(cpu_buffer);
1566
1567         return 0;
1568 }
1569
1570 static struct ring_buffer_per_cpu *
1571 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1572 {
1573         struct ring_buffer_per_cpu *cpu_buffer;
1574         struct buffer_page *bpage;
1575         struct page *page;
1576         int ret;
1577
1578         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1579                                   GFP_KERNEL, cpu_to_node(cpu));
1580         if (!cpu_buffer)
1581                 return NULL;
1582
1583         cpu_buffer->cpu = cpu;
1584         cpu_buffer->buffer = buffer;
1585         raw_spin_lock_init(&cpu_buffer->reader_lock);
1586         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1587         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1588         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1589         init_completion(&cpu_buffer->update_done);
1590         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1591         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1592         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1593
1594         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1595                             GFP_KERNEL, cpu_to_node(cpu));
1596         if (!bpage)
1597                 goto fail_free_buffer;
1598
1599         rb_check_bpage(cpu_buffer, bpage);
1600
1601         cpu_buffer->reader_page = bpage;
1602
1603         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
1604         if (!page)
1605                 goto fail_free_reader;
1606         bpage->page = page_address(page);
1607         rb_init_page(bpage->page);
1608
1609         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1610         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1611
1612         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1613         if (ret < 0)
1614                 goto fail_free_reader;
1615
1616         cpu_buffer->head_page
1617                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1618         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1619
1620         rb_head_page_activate(cpu_buffer);
1621
1622         return cpu_buffer;
1623
1624  fail_free_reader:
1625         free_buffer_page(cpu_buffer->reader_page);
1626
1627  fail_free_buffer:
1628         kfree(cpu_buffer);
1629         return NULL;
1630 }
1631
1632 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1633 {
1634         struct list_head *head = cpu_buffer->pages;
1635         struct buffer_page *bpage, *tmp;
1636
1637         irq_work_sync(&cpu_buffer->irq_work.work);
1638
1639         free_buffer_page(cpu_buffer->reader_page);
1640
1641         if (head) {
1642                 rb_head_page_deactivate(cpu_buffer);
1643
1644                 list_for_each_entry_safe(bpage, tmp, head, list) {
1645                         list_del_init(&bpage->list);
1646                         free_buffer_page(bpage);
1647                 }
1648                 bpage = list_entry(head, struct buffer_page, list);
1649                 free_buffer_page(bpage);
1650         }
1651
1652         free_page((unsigned long)cpu_buffer->free_page);
1653
1654         kfree(cpu_buffer);
1655 }
1656
1657 /**
1658  * __ring_buffer_alloc - allocate a new ring_buffer
1659  * @size: the size in bytes per cpu that is needed.
1660  * @flags: attributes to set for the ring buffer.
1661  * @key: ring buffer reader_lock_key.
1662  *
1663  * Currently the only flag that is available is the RB_FL_OVERWRITE
1664  * flag. This flag means that the buffer will overwrite old data
1665  * when the buffer wraps. If this flag is not set, the buffer will
1666  * drop data when the tail hits the head.
1667  */
1668 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1669                                         struct lock_class_key *key)
1670 {
1671         struct trace_buffer *buffer;
1672         long nr_pages;
1673         int bsize;
1674         int cpu;
1675         int ret;
1676
1677         /* keep it in its own cache line */
1678         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1679                          GFP_KERNEL);
1680         if (!buffer)
1681                 return NULL;
1682
1683         if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1684                 goto fail_free_buffer;
1685
1686         /* Default buffer page size - one system page */
1687         buffer->subbuf_order = 0;
1688         buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
1689
1690         /* Max payload is buffer page size - header (8bytes) */
1691         buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
1692
1693         nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
1694         buffer->flags = flags;
1695         buffer->clock = trace_clock_local;
1696         buffer->reader_lock_key = key;
1697
1698         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1699         init_waitqueue_head(&buffer->irq_work.waiters);
1700
1701         /* need at least two pages */
1702         if (nr_pages < 2)
1703                 nr_pages = 2;
1704
1705         buffer->cpus = nr_cpu_ids;
1706
1707         bsize = sizeof(void *) * nr_cpu_ids;
1708         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1709                                   GFP_KERNEL);
1710         if (!buffer->buffers)
1711                 goto fail_free_cpumask;
1712
1713         cpu = raw_smp_processor_id();
1714         cpumask_set_cpu(cpu, buffer->cpumask);
1715         buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1716         if (!buffer->buffers[cpu])
1717                 goto fail_free_buffers;
1718
1719         ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1720         if (ret < 0)
1721                 goto fail_free_buffers;
1722
1723         mutex_init(&buffer->mutex);
1724
1725         return buffer;
1726
1727  fail_free_buffers:
1728         for_each_buffer_cpu(buffer, cpu) {
1729                 if (buffer->buffers[cpu])
1730                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1731         }
1732         kfree(buffer->buffers);
1733
1734  fail_free_cpumask:
1735         free_cpumask_var(buffer->cpumask);
1736
1737  fail_free_buffer:
1738         kfree(buffer);
1739         return NULL;
1740 }
1741 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1742
1743 /**
1744  * ring_buffer_free - free a ring buffer.
1745  * @buffer: the buffer to free.
1746  */
1747 void
1748 ring_buffer_free(struct trace_buffer *buffer)
1749 {
1750         int cpu;
1751
1752         cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1753
1754         irq_work_sync(&buffer->irq_work.work);
1755
1756         for_each_buffer_cpu(buffer, cpu)
1757                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1758
1759         kfree(buffer->buffers);
1760         free_cpumask_var(buffer->cpumask);
1761
1762         kfree(buffer);
1763 }
1764 EXPORT_SYMBOL_GPL(ring_buffer_free);
1765
1766 void ring_buffer_set_clock(struct trace_buffer *buffer,
1767                            u64 (*clock)(void))
1768 {
1769         buffer->clock = clock;
1770 }
1771
1772 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1773 {
1774         buffer->time_stamp_abs = abs;
1775 }
1776
1777 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1778 {
1779         return buffer->time_stamp_abs;
1780 }
1781
1782 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1783
1784 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1785 {
1786         return local_read(&bpage->entries) & RB_WRITE_MASK;
1787 }
1788
1789 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1790 {
1791         return local_read(&bpage->write) & RB_WRITE_MASK;
1792 }
1793
1794 static bool
1795 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1796 {
1797         struct list_head *tail_page, *to_remove, *next_page;
1798         struct buffer_page *to_remove_page, *tmp_iter_page;
1799         struct buffer_page *last_page, *first_page;
1800         unsigned long nr_removed;
1801         unsigned long head_bit;
1802         int page_entries;
1803
1804         head_bit = 0;
1805
1806         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1807         atomic_inc(&cpu_buffer->record_disabled);
1808         /*
1809          * We don't race with the readers since we have acquired the reader
1810          * lock. We also don't race with writers after disabling recording.
1811          * This makes it easy to figure out the first and the last page to be
1812          * removed from the list. We unlink all the pages in between including
1813          * the first and last pages. This is done in a busy loop so that we
1814          * lose the least number of traces.
1815          * The pages are freed after we restart recording and unlock readers.
1816          */
1817         tail_page = &cpu_buffer->tail_page->list;
1818
1819         /*
1820          * tail page might be on reader page, we remove the next page
1821          * from the ring buffer
1822          */
1823         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1824                 tail_page = rb_list_head(tail_page->next);
1825         to_remove = tail_page;
1826
1827         /* start of pages to remove */
1828         first_page = list_entry(rb_list_head(to_remove->next),
1829                                 struct buffer_page, list);
1830
1831         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1832                 to_remove = rb_list_head(to_remove)->next;
1833                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1834         }
1835         /* Read iterators need to reset themselves when some pages removed */
1836         cpu_buffer->pages_removed += nr_removed;
1837
1838         next_page = rb_list_head(to_remove)->next;
1839
1840         /*
1841          * Now we remove all pages between tail_page and next_page.
1842          * Make sure that we have head_bit value preserved for the
1843          * next page
1844          */
1845         tail_page->next = (struct list_head *)((unsigned long)next_page |
1846                                                 head_bit);
1847         next_page = rb_list_head(next_page);
1848         next_page->prev = tail_page;
1849
1850         /* make sure pages points to a valid page in the ring buffer */
1851         cpu_buffer->pages = next_page;
1852
1853         /* update head page */
1854         if (head_bit)
1855                 cpu_buffer->head_page = list_entry(next_page,
1856                                                 struct buffer_page, list);
1857
1858         /* pages are removed, resume tracing and then free the pages */
1859         atomic_dec(&cpu_buffer->record_disabled);
1860         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1861
1862         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1863
1864         /* last buffer page to remove */
1865         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1866                                 list);
1867         tmp_iter_page = first_page;
1868
1869         do {
1870                 cond_resched();
1871
1872                 to_remove_page = tmp_iter_page;
1873                 rb_inc_page(&tmp_iter_page);
1874
1875                 /* update the counters */
1876                 page_entries = rb_page_entries(to_remove_page);
1877                 if (page_entries) {
1878                         /*
1879                          * If something was added to this page, it was full
1880                          * since it is not the tail page. So we deduct the
1881                          * bytes consumed in ring buffer from here.
1882                          * Increment overrun to account for the lost events.
1883                          */
1884                         local_add(page_entries, &cpu_buffer->overrun);
1885                         local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
1886                         local_inc(&cpu_buffer->pages_lost);
1887                 }
1888
1889                 /*
1890                  * We have already removed references to this list item, just
1891                  * free up the buffer_page and its page
1892                  */
1893                 free_buffer_page(to_remove_page);
1894                 nr_removed--;
1895
1896         } while (to_remove_page != last_page);
1897
1898         RB_WARN_ON(cpu_buffer, nr_removed);
1899
1900         return nr_removed == 0;
1901 }
1902
1903 static bool
1904 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1905 {
1906         struct list_head *pages = &cpu_buffer->new_pages;
1907         unsigned long flags;
1908         bool success;
1909         int retries;
1910
1911         /* Can be called at early boot up, where interrupts must not been enabled */
1912         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1913         /*
1914          * We are holding the reader lock, so the reader page won't be swapped
1915          * in the ring buffer. Now we are racing with the writer trying to
1916          * move head page and the tail page.
1917          * We are going to adapt the reader page update process where:
1918          * 1. We first splice the start and end of list of new pages between
1919          *    the head page and its previous page.
1920          * 2. We cmpxchg the prev_page->next to point from head page to the
1921          *    start of new pages list.
1922          * 3. Finally, we update the head->prev to the end of new list.
1923          *
1924          * We will try this process 10 times, to make sure that we don't keep
1925          * spinning.
1926          */
1927         retries = 10;
1928         success = false;
1929         while (retries--) {
1930                 struct list_head *head_page, *prev_page;
1931                 struct list_head *last_page, *first_page;
1932                 struct list_head *head_page_with_bit;
1933                 struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
1934
1935                 if (!hpage)
1936                         break;
1937                 head_page = &hpage->list;
1938                 prev_page = head_page->prev;
1939
1940                 first_page = pages->next;
1941                 last_page  = pages->prev;
1942
1943                 head_page_with_bit = (struct list_head *)
1944                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1945
1946                 last_page->next = head_page_with_bit;
1947                 first_page->prev = prev_page;
1948
1949                 /* caution: head_page_with_bit gets updated on cmpxchg failure */
1950                 if (try_cmpxchg(&prev_page->next,
1951                                 &head_page_with_bit, first_page)) {
1952                         /*
1953                          * yay, we replaced the page pointer to our new list,
1954                          * now, we just have to update to head page's prev
1955                          * pointer to point to end of list
1956                          */
1957                         head_page->prev = last_page;
1958                         success = true;
1959                         break;
1960                 }
1961         }
1962
1963         if (success)
1964                 INIT_LIST_HEAD(pages);
1965         /*
1966          * If we weren't successful in adding in new pages, warn and stop
1967          * tracing
1968          */
1969         RB_WARN_ON(cpu_buffer, !success);
1970         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1971
1972         /* free pages if they weren't inserted */
1973         if (!success) {
1974                 struct buffer_page *bpage, *tmp;
1975                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1976                                          list) {
1977                         list_del_init(&bpage->list);
1978                         free_buffer_page(bpage);
1979                 }
1980         }
1981         return success;
1982 }
1983
1984 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1985 {
1986         bool success;
1987
1988         if (cpu_buffer->nr_pages_to_update > 0)
1989                 success = rb_insert_pages(cpu_buffer);
1990         else
1991                 success = rb_remove_pages(cpu_buffer,
1992                                         -cpu_buffer->nr_pages_to_update);
1993
1994         if (success)
1995                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1996 }
1997
1998 static void update_pages_handler(struct work_struct *work)
1999 {
2000         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2001                         struct ring_buffer_per_cpu, update_pages_work);
2002         rb_update_pages(cpu_buffer);
2003         complete(&cpu_buffer->update_done);
2004 }
2005
2006 /**
2007  * ring_buffer_resize - resize the ring buffer
2008  * @buffer: the buffer to resize.
2009  * @size: the new size.
2010  * @cpu_id: the cpu buffer to resize
2011  *
2012  * Minimum size is 2 * buffer->subbuf_size.
2013  *
2014  * Returns 0 on success and < 0 on failure.
2015  */
2016 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2017                         int cpu_id)
2018 {
2019         struct ring_buffer_per_cpu *cpu_buffer;
2020         unsigned long nr_pages;
2021         int cpu, err;
2022
2023         /*
2024          * Always succeed at resizing a non-existent buffer:
2025          */
2026         if (!buffer)
2027                 return 0;
2028
2029         /* Make sure the requested buffer exists */
2030         if (cpu_id != RING_BUFFER_ALL_CPUS &&
2031             !cpumask_test_cpu(cpu_id, buffer->cpumask))
2032                 return 0;
2033
2034         nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2035
2036         /* we need a minimum of two pages */
2037         if (nr_pages < 2)
2038                 nr_pages = 2;
2039
2040         /* prevent another thread from changing buffer sizes */
2041         mutex_lock(&buffer->mutex);
2042         atomic_inc(&buffer->resizing);
2043
2044         if (cpu_id == RING_BUFFER_ALL_CPUS) {
2045                 /*
2046                  * Don't succeed if resizing is disabled, as a reader might be
2047                  * manipulating the ring buffer and is expecting a sane state while
2048                  * this is true.
2049                  */
2050                 for_each_buffer_cpu(buffer, cpu) {
2051                         cpu_buffer = buffer->buffers[cpu];
2052                         if (atomic_read(&cpu_buffer->resize_disabled)) {
2053                                 err = -EBUSY;
2054                                 goto out_err_unlock;
2055                         }
2056                 }
2057
2058                 /* calculate the pages to update */
2059                 for_each_buffer_cpu(buffer, cpu) {
2060                         cpu_buffer = buffer->buffers[cpu];
2061
2062                         cpu_buffer->nr_pages_to_update = nr_pages -
2063                                                         cpu_buffer->nr_pages;
2064                         /*
2065                          * nothing more to do for removing pages or no update
2066                          */
2067                         if (cpu_buffer->nr_pages_to_update <= 0)
2068                                 continue;
2069                         /*
2070                          * to add pages, make sure all new pages can be
2071                          * allocated without receiving ENOMEM
2072                          */
2073                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
2074                         if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2075                                                 &cpu_buffer->new_pages)) {
2076                                 /* not enough memory for new pages */
2077                                 err = -ENOMEM;
2078                                 goto out_err;
2079                         }
2080
2081                         cond_resched();
2082                 }
2083
2084                 cpus_read_lock();
2085                 /*
2086                  * Fire off all the required work handlers
2087                  * We can't schedule on offline CPUs, but it's not necessary
2088                  * since we can change their buffer sizes without any race.
2089                  */
2090                 for_each_buffer_cpu(buffer, cpu) {
2091                         cpu_buffer = buffer->buffers[cpu];
2092                         if (!cpu_buffer->nr_pages_to_update)
2093                                 continue;
2094
2095                         /* Can't run something on an offline CPU. */
2096                         if (!cpu_online(cpu)) {
2097                                 rb_update_pages(cpu_buffer);
2098                                 cpu_buffer->nr_pages_to_update = 0;
2099                         } else {
2100                                 /* Run directly if possible. */
2101                                 migrate_disable();
2102                                 if (cpu != smp_processor_id()) {
2103                                         migrate_enable();
2104                                         schedule_work_on(cpu,
2105                                                          &cpu_buffer->update_pages_work);
2106                                 } else {
2107                                         update_pages_handler(&cpu_buffer->update_pages_work);
2108                                         migrate_enable();
2109                                 }
2110                         }
2111                 }
2112
2113                 /* wait for all the updates to complete */
2114                 for_each_buffer_cpu(buffer, cpu) {
2115                         cpu_buffer = buffer->buffers[cpu];
2116                         if (!cpu_buffer->nr_pages_to_update)
2117                                 continue;
2118
2119                         if (cpu_online(cpu))
2120                                 wait_for_completion(&cpu_buffer->update_done);
2121                         cpu_buffer->nr_pages_to_update = 0;
2122                 }
2123
2124                 cpus_read_unlock();
2125         } else {
2126                 cpu_buffer = buffer->buffers[cpu_id];
2127
2128                 if (nr_pages == cpu_buffer->nr_pages)
2129                         goto out;
2130
2131                 /*
2132                  * Don't succeed if resizing is disabled, as a reader might be
2133                  * manipulating the ring buffer and is expecting a sane state while
2134                  * this is true.
2135                  */
2136                 if (atomic_read(&cpu_buffer->resize_disabled)) {
2137                         err = -EBUSY;
2138                         goto out_err_unlock;
2139                 }
2140
2141                 cpu_buffer->nr_pages_to_update = nr_pages -
2142                                                 cpu_buffer->nr_pages;
2143
2144                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2145                 if (cpu_buffer->nr_pages_to_update > 0 &&
2146                         __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2147                                             &cpu_buffer->new_pages)) {
2148                         err = -ENOMEM;
2149                         goto out_err;
2150                 }
2151
2152                 cpus_read_lock();
2153
2154                 /* Can't run something on an offline CPU. */
2155                 if (!cpu_online(cpu_id))
2156                         rb_update_pages(cpu_buffer);
2157                 else {
2158                         /* Run directly if possible. */
2159                         migrate_disable();
2160                         if (cpu_id == smp_processor_id()) {
2161                                 rb_update_pages(cpu_buffer);
2162                                 migrate_enable();
2163                         } else {
2164                                 migrate_enable();
2165                                 schedule_work_on(cpu_id,
2166                                                  &cpu_buffer->update_pages_work);
2167                                 wait_for_completion(&cpu_buffer->update_done);
2168                         }
2169                 }
2170
2171                 cpu_buffer->nr_pages_to_update = 0;
2172                 cpus_read_unlock();
2173         }
2174
2175  out:
2176         /*
2177          * The ring buffer resize can happen with the ring buffer
2178          * enabled, so that the update disturbs the tracing as little
2179          * as possible. But if the buffer is disabled, we do not need
2180          * to worry about that, and we can take the time to verify
2181          * that the buffer is not corrupt.
2182          */
2183         if (atomic_read(&buffer->record_disabled)) {
2184                 atomic_inc(&buffer->record_disabled);
2185                 /*
2186                  * Even though the buffer was disabled, we must make sure
2187                  * that it is truly disabled before calling rb_check_pages.
2188                  * There could have been a race between checking
2189                  * record_disable and incrementing it.
2190                  */
2191                 synchronize_rcu();
2192                 for_each_buffer_cpu(buffer, cpu) {
2193                         cpu_buffer = buffer->buffers[cpu];
2194                         rb_check_pages(cpu_buffer);
2195                 }
2196                 atomic_dec(&buffer->record_disabled);
2197         }
2198
2199         atomic_dec(&buffer->resizing);
2200         mutex_unlock(&buffer->mutex);
2201         return 0;
2202
2203  out_err:
2204         for_each_buffer_cpu(buffer, cpu) {
2205                 struct buffer_page *bpage, *tmp;
2206
2207                 cpu_buffer = buffer->buffers[cpu];
2208                 cpu_buffer->nr_pages_to_update = 0;
2209
2210                 if (list_empty(&cpu_buffer->new_pages))
2211                         continue;
2212
2213                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2214                                         list) {
2215                         list_del_init(&bpage->list);
2216                         free_buffer_page(bpage);
2217                 }
2218         }
2219  out_err_unlock:
2220         atomic_dec(&buffer->resizing);
2221         mutex_unlock(&buffer->mutex);
2222         return err;
2223 }
2224 EXPORT_SYMBOL_GPL(ring_buffer_resize);
2225
2226 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2227 {
2228         mutex_lock(&buffer->mutex);
2229         if (val)
2230                 buffer->flags |= RB_FL_OVERWRITE;
2231         else
2232                 buffer->flags &= ~RB_FL_OVERWRITE;
2233         mutex_unlock(&buffer->mutex);
2234 }
2235 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2236
2237 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2238 {
2239         return bpage->page->data + index;
2240 }
2241
2242 static __always_inline struct ring_buffer_event *
2243 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2244 {
2245         return __rb_page_index(cpu_buffer->reader_page,
2246                                cpu_buffer->reader_page->read);
2247 }
2248
2249 static struct ring_buffer_event *
2250 rb_iter_head_event(struct ring_buffer_iter *iter)
2251 {
2252         struct ring_buffer_event *event;
2253         struct buffer_page *iter_head_page = iter->head_page;
2254         unsigned long commit;
2255         unsigned length;
2256
2257         if (iter->head != iter->next_event)
2258                 return iter->event;
2259
2260         /*
2261          * When the writer goes across pages, it issues a cmpxchg which
2262          * is a mb(), which will synchronize with the rmb here.
2263          * (see rb_tail_page_update() and __rb_reserve_next())
2264          */
2265         commit = rb_page_commit(iter_head_page);
2266         smp_rmb();
2267
2268         /* An event needs to be at least 8 bytes in size */
2269         if (iter->head > commit - 8)
2270                 goto reset;
2271
2272         event = __rb_page_index(iter_head_page, iter->head);
2273         length = rb_event_length(event);
2274
2275         /*
2276          * READ_ONCE() doesn't work on functions and we don't want the
2277          * compiler doing any crazy optimizations with length.
2278          */
2279         barrier();
2280
2281         if ((iter->head + length) > commit || length > iter->event_size)
2282                 /* Writer corrupted the read? */
2283                 goto reset;
2284
2285         memcpy(iter->event, event, length);
2286         /*
2287          * If the page stamp is still the same after this rmb() then the
2288          * event was safely copied without the writer entering the page.
2289          */
2290         smp_rmb();
2291
2292         /* Make sure the page didn't change since we read this */
2293         if (iter->page_stamp != iter_head_page->page->time_stamp ||
2294             commit > rb_page_commit(iter_head_page))
2295                 goto reset;
2296
2297         iter->next_event = iter->head + length;
2298         return iter->event;
2299  reset:
2300         /* Reset to the beginning */
2301         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2302         iter->head = 0;
2303         iter->next_event = 0;
2304         iter->missed_events = 1;
2305         return NULL;
2306 }
2307
2308 /* Size is determined by what has been committed */
2309 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2310 {
2311         return rb_page_commit(bpage);
2312 }
2313
2314 static __always_inline unsigned
2315 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2316 {
2317         return rb_page_commit(cpu_buffer->commit_page);
2318 }
2319
2320 static __always_inline unsigned
2321 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
2322 {
2323         unsigned long addr = (unsigned long)event;
2324
2325         addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
2326
2327         return addr - BUF_PAGE_HDR_SIZE;
2328 }
2329
2330 static void rb_inc_iter(struct ring_buffer_iter *iter)
2331 {
2332         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2333
2334         /*
2335          * The iterator could be on the reader page (it starts there).
2336          * But the head could have moved, since the reader was
2337          * found. Check for this case and assign the iterator
2338          * to the head page instead of next.
2339          */
2340         if (iter->head_page == cpu_buffer->reader_page)
2341                 iter->head_page = rb_set_head_page(cpu_buffer);
2342         else
2343                 rb_inc_page(&iter->head_page);
2344
2345         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2346         iter->head = 0;
2347         iter->next_event = 0;
2348 }
2349
2350 /*
2351  * rb_handle_head_page - writer hit the head page
2352  *
2353  * Returns: +1 to retry page
2354  *           0 to continue
2355  *          -1 on error
2356  */
2357 static int
2358 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2359                     struct buffer_page *tail_page,
2360                     struct buffer_page *next_page)
2361 {
2362         struct buffer_page *new_head;
2363         int entries;
2364         int type;
2365         int ret;
2366
2367         entries = rb_page_entries(next_page);
2368
2369         /*
2370          * The hard part is here. We need to move the head
2371          * forward, and protect against both readers on
2372          * other CPUs and writers coming in via interrupts.
2373          */
2374         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2375                                        RB_PAGE_HEAD);
2376
2377         /*
2378          * type can be one of four:
2379          *  NORMAL - an interrupt already moved it for us
2380          *  HEAD   - we are the first to get here.
2381          *  UPDATE - we are the interrupt interrupting
2382          *           a current move.
2383          *  MOVED  - a reader on another CPU moved the next
2384          *           pointer to its reader page. Give up
2385          *           and try again.
2386          */
2387
2388         switch (type) {
2389         case RB_PAGE_HEAD:
2390                 /*
2391                  * We changed the head to UPDATE, thus
2392                  * it is our responsibility to update
2393                  * the counters.
2394                  */
2395                 local_add(entries, &cpu_buffer->overrun);
2396                 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
2397                 local_inc(&cpu_buffer->pages_lost);
2398
2399                 /*
2400                  * The entries will be zeroed out when we move the
2401                  * tail page.
2402                  */
2403
2404                 /* still more to do */
2405                 break;
2406
2407         case RB_PAGE_UPDATE:
2408                 /*
2409                  * This is an interrupt that interrupt the
2410                  * previous update. Still more to do.
2411                  */
2412                 break;
2413         case RB_PAGE_NORMAL:
2414                 /*
2415                  * An interrupt came in before the update
2416                  * and processed this for us.
2417                  * Nothing left to do.
2418                  */
2419                 return 1;
2420         case RB_PAGE_MOVED:
2421                 /*
2422                  * The reader is on another CPU and just did
2423                  * a swap with our next_page.
2424                  * Try again.
2425                  */
2426                 return 1;
2427         default:
2428                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2429                 return -1;
2430         }
2431
2432         /*
2433          * Now that we are here, the old head pointer is
2434          * set to UPDATE. This will keep the reader from
2435          * swapping the head page with the reader page.
2436          * The reader (on another CPU) will spin till
2437          * we are finished.
2438          *
2439          * We just need to protect against interrupts
2440          * doing the job. We will set the next pointer
2441          * to HEAD. After that, we set the old pointer
2442          * to NORMAL, but only if it was HEAD before.
2443          * otherwise we are an interrupt, and only
2444          * want the outer most commit to reset it.
2445          */
2446         new_head = next_page;
2447         rb_inc_page(&new_head);
2448
2449         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2450                                     RB_PAGE_NORMAL);
2451
2452         /*
2453          * Valid returns are:
2454          *  HEAD   - an interrupt came in and already set it.
2455          *  NORMAL - One of two things:
2456          *            1) We really set it.
2457          *            2) A bunch of interrupts came in and moved
2458          *               the page forward again.
2459          */
2460         switch (ret) {
2461         case RB_PAGE_HEAD:
2462         case RB_PAGE_NORMAL:
2463                 /* OK */
2464                 break;
2465         default:
2466                 RB_WARN_ON(cpu_buffer, 1);
2467                 return -1;
2468         }
2469
2470         /*
2471          * It is possible that an interrupt came in,
2472          * set the head up, then more interrupts came in
2473          * and moved it again. When we get back here,
2474          * the page would have been set to NORMAL but we
2475          * just set it back to HEAD.
2476          *
2477          * How do you detect this? Well, if that happened
2478          * the tail page would have moved.
2479          */
2480         if (ret == RB_PAGE_NORMAL) {
2481                 struct buffer_page *buffer_tail_page;
2482
2483                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2484                 /*
2485                  * If the tail had moved passed next, then we need
2486                  * to reset the pointer.
2487                  */
2488                 if (buffer_tail_page != tail_page &&
2489                     buffer_tail_page != next_page)
2490                         rb_head_page_set_normal(cpu_buffer, new_head,
2491                                                 next_page,
2492                                                 RB_PAGE_HEAD);
2493         }
2494
2495         /*
2496          * If this was the outer most commit (the one that
2497          * changed the original pointer from HEAD to UPDATE),
2498          * then it is up to us to reset it to NORMAL.
2499          */
2500         if (type == RB_PAGE_HEAD) {
2501                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2502                                               tail_page,
2503                                               RB_PAGE_UPDATE);
2504                 if (RB_WARN_ON(cpu_buffer,
2505                                ret != RB_PAGE_UPDATE))
2506                         return -1;
2507         }
2508
2509         return 0;
2510 }
2511
2512 static inline void
2513 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2514               unsigned long tail, struct rb_event_info *info)
2515 {
2516         unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
2517         struct buffer_page *tail_page = info->tail_page;
2518         struct ring_buffer_event *event;
2519         unsigned long length = info->length;
2520
2521         /*
2522          * Only the event that crossed the page boundary
2523          * must fill the old tail_page with padding.
2524          */
2525         if (tail >= bsize) {
2526                 /*
2527                  * If the page was filled, then we still need
2528                  * to update the real_end. Reset it to zero
2529                  * and the reader will ignore it.
2530                  */
2531                 if (tail == bsize)
2532                         tail_page->real_end = 0;
2533
2534                 local_sub(length, &tail_page->write);
2535                 return;
2536         }
2537
2538         event = __rb_page_index(tail_page, tail);
2539
2540         /*
2541          * Save the original length to the meta data.
2542          * This will be used by the reader to add lost event
2543          * counter.
2544          */
2545         tail_page->real_end = tail;
2546
2547         /*
2548          * If this event is bigger than the minimum size, then
2549          * we need to be careful that we don't subtract the
2550          * write counter enough to allow another writer to slip
2551          * in on this page.
2552          * We put in a discarded commit instead, to make sure
2553          * that this space is not used again, and this space will
2554          * not be accounted into 'entries_bytes'.
2555          *
2556          * If we are less than the minimum size, we don't need to
2557          * worry about it.
2558          */
2559         if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
2560                 /* No room for any events */
2561
2562                 /* Mark the rest of the page with padding */
2563                 rb_event_set_padding(event);
2564
2565                 /* Make sure the padding is visible before the write update */
2566                 smp_wmb();
2567
2568                 /* Set the write back to the previous setting */
2569                 local_sub(length, &tail_page->write);
2570                 return;
2571         }
2572
2573         /* Put in a discarded event */
2574         event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
2575         event->type_len = RINGBUF_TYPE_PADDING;
2576         /* time delta must be non zero */
2577         event->time_delta = 1;
2578
2579         /* account for padding bytes */
2580         local_add(bsize - tail, &cpu_buffer->entries_bytes);
2581
2582         /* Make sure the padding is visible before the tail_page->write update */
2583         smp_wmb();
2584
2585         /* Set write to end of buffer */
2586         length = (tail + length) - bsize;
2587         local_sub(length, &tail_page->write);
2588 }
2589
2590 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2591
2592 /*
2593  * This is the slow path, force gcc not to inline it.
2594  */
2595 static noinline struct ring_buffer_event *
2596 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2597              unsigned long tail, struct rb_event_info *info)
2598 {
2599         struct buffer_page *tail_page = info->tail_page;
2600         struct buffer_page *commit_page = cpu_buffer->commit_page;
2601         struct trace_buffer *buffer = cpu_buffer->buffer;
2602         struct buffer_page *next_page;
2603         int ret;
2604
2605         next_page = tail_page;
2606
2607         rb_inc_page(&next_page);
2608
2609         /*
2610          * If for some reason, we had an interrupt storm that made
2611          * it all the way around the buffer, bail, and warn
2612          * about it.
2613          */
2614         if (unlikely(next_page == commit_page)) {
2615                 local_inc(&cpu_buffer->commit_overrun);
2616                 goto out_reset;
2617         }
2618
2619         /*
2620          * This is where the fun begins!
2621          *
2622          * We are fighting against races between a reader that
2623          * could be on another CPU trying to swap its reader
2624          * page with the buffer head.
2625          *
2626          * We are also fighting against interrupts coming in and
2627          * moving the head or tail on us as well.
2628          *
2629          * If the next page is the head page then we have filled
2630          * the buffer, unless the commit page is still on the
2631          * reader page.
2632          */
2633         if (rb_is_head_page(next_page, &tail_page->list)) {
2634
2635                 /*
2636                  * If the commit is not on the reader page, then
2637                  * move the header page.
2638                  */
2639                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2640                         /*
2641                          * If we are not in overwrite mode,
2642                          * this is easy, just stop here.
2643                          */
2644                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2645                                 local_inc(&cpu_buffer->dropped_events);
2646                                 goto out_reset;
2647                         }
2648
2649                         ret = rb_handle_head_page(cpu_buffer,
2650                                                   tail_page,
2651                                                   next_page);
2652                         if (ret < 0)
2653                                 goto out_reset;
2654                         if (ret)
2655                                 goto out_again;
2656                 } else {
2657                         /*
2658                          * We need to be careful here too. The
2659                          * commit page could still be on the reader
2660                          * page. We could have a small buffer, and
2661                          * have filled up the buffer with events
2662                          * from interrupts and such, and wrapped.
2663                          *
2664                          * Note, if the tail page is also on the
2665                          * reader_page, we let it move out.
2666                          */
2667                         if (unlikely((cpu_buffer->commit_page !=
2668                                       cpu_buffer->tail_page) &&
2669                                      (cpu_buffer->commit_page ==
2670                                       cpu_buffer->reader_page))) {
2671                                 local_inc(&cpu_buffer->commit_overrun);
2672                                 goto out_reset;
2673                         }
2674                 }
2675         }
2676
2677         rb_tail_page_update(cpu_buffer, tail_page, next_page);
2678
2679  out_again:
2680
2681         rb_reset_tail(cpu_buffer, tail, info);
2682
2683         /* Commit what we have for now. */
2684         rb_end_commit(cpu_buffer);
2685         /* rb_end_commit() decs committing */
2686         local_inc(&cpu_buffer->committing);
2687
2688         /* fail and let the caller try again */
2689         return ERR_PTR(-EAGAIN);
2690
2691  out_reset:
2692         /* reset write */
2693         rb_reset_tail(cpu_buffer, tail, info);
2694
2695         return NULL;
2696 }
2697
2698 /* Slow path */
2699 static struct ring_buffer_event *
2700 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2701                   struct ring_buffer_event *event, u64 delta, bool abs)
2702 {
2703         if (abs)
2704                 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2705         else
2706                 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2707
2708         /* Not the first event on the page, or not delta? */
2709         if (abs || rb_event_index(cpu_buffer, event)) {
2710                 event->time_delta = delta & TS_MASK;
2711                 event->array[0] = delta >> TS_SHIFT;
2712         } else {
2713                 /* nope, just zero it */
2714                 event->time_delta = 0;
2715                 event->array[0] = 0;
2716         }
2717
2718         return skip_time_extend(event);
2719 }
2720
2721 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2722 static inline bool sched_clock_stable(void)
2723 {
2724         return true;
2725 }
2726 #endif
2727
2728 static void
2729 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2730                    struct rb_event_info *info)
2731 {
2732         u64 write_stamp;
2733
2734         WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2735                   (unsigned long long)info->delta,
2736                   (unsigned long long)info->ts,
2737                   (unsigned long long)info->before,
2738                   (unsigned long long)info->after,
2739                   (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
2740                   sched_clock_stable() ? "" :
2741                   "If you just came from a suspend/resume,\n"
2742                   "please switch to the trace global clock:\n"
2743                   "  echo global > /sys/kernel/tracing/trace_clock\n"
2744                   "or add trace_clock=global to the kernel command line\n");
2745 }
2746
2747 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2748                                       struct ring_buffer_event **event,
2749                                       struct rb_event_info *info,
2750                                       u64 *delta,
2751                                       unsigned int *length)
2752 {
2753         bool abs = info->add_timestamp &
2754                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2755
2756         if (unlikely(info->delta > (1ULL << 59))) {
2757                 /*
2758                  * Some timers can use more than 59 bits, and when a timestamp
2759                  * is added to the buffer, it will lose those bits.
2760                  */
2761                 if (abs && (info->ts & TS_MSB)) {
2762                         info->delta &= ABS_TS_MASK;
2763
2764                 /* did the clock go backwards */
2765                 } else if (info->before == info->after && info->before > info->ts) {
2766                         /* not interrupted */
2767                         static int once;
2768
2769                         /*
2770                          * This is possible with a recalibrating of the TSC.
2771                          * Do not produce a call stack, but just report it.
2772                          */
2773                         if (!once) {
2774                                 once++;
2775                                 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2776                                         info->before, info->ts);
2777                         }
2778                 } else
2779                         rb_check_timestamp(cpu_buffer, info);
2780                 if (!abs)
2781                         info->delta = 0;
2782         }
2783         *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
2784         *length -= RB_LEN_TIME_EXTEND;
2785         *delta = 0;
2786 }
2787
2788 /**
2789  * rb_update_event - update event type and data
2790  * @cpu_buffer: The per cpu buffer of the @event
2791  * @event: the event to update
2792  * @info: The info to update the @event with (contains length and delta)
2793  *
2794  * Update the type and data fields of the @event. The length
2795  * is the actual size that is written to the ring buffer,
2796  * and with this, we can determine what to place into the
2797  * data field.
2798  */
2799 static void
2800 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2801                 struct ring_buffer_event *event,
2802                 struct rb_event_info *info)
2803 {
2804         unsigned length = info->length;
2805         u64 delta = info->delta;
2806         unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2807
2808         if (!WARN_ON_ONCE(nest >= MAX_NEST))
2809                 cpu_buffer->event_stamp[nest] = info->ts;
2810
2811         /*
2812          * If we need to add a timestamp, then we
2813          * add it to the start of the reserved space.
2814          */
2815         if (unlikely(info->add_timestamp))
2816                 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2817
2818         event->time_delta = delta;
2819         length -= RB_EVNT_HDR_SIZE;
2820         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2821                 event->type_len = 0;
2822                 event->array[0] = length;
2823         } else
2824                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2825 }
2826
2827 static unsigned rb_calculate_event_length(unsigned length)
2828 {
2829         struct ring_buffer_event event; /* Used only for sizeof array */
2830
2831         /* zero length can cause confusions */
2832         if (!length)
2833                 length++;
2834
2835         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2836                 length += sizeof(event.array[0]);
2837
2838         length += RB_EVNT_HDR_SIZE;
2839         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2840
2841         /*
2842          * In case the time delta is larger than the 27 bits for it
2843          * in the header, we need to add a timestamp. If another
2844          * event comes in when trying to discard this one to increase
2845          * the length, then the timestamp will be added in the allocated
2846          * space of this event. If length is bigger than the size needed
2847          * for the TIME_EXTEND, then padding has to be used. The events
2848          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2849          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2850          * As length is a multiple of 4, we only need to worry if it
2851          * is 12 (RB_LEN_TIME_EXTEND + 4).
2852          */
2853         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2854                 length += RB_ALIGNMENT;
2855
2856         return length;
2857 }
2858
2859 static inline bool
2860 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2861                   struct ring_buffer_event *event)
2862 {
2863         unsigned long new_index, old_index;
2864         struct buffer_page *bpage;
2865         unsigned long addr;
2866
2867         new_index = rb_event_index(cpu_buffer, event);
2868         old_index = new_index + rb_event_ts_length(event);
2869         addr = (unsigned long)event;
2870         addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
2871
2872         bpage = READ_ONCE(cpu_buffer->tail_page);
2873
2874         /*
2875          * Make sure the tail_page is still the same and
2876          * the next write location is the end of this event
2877          */
2878         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2879                 unsigned long write_mask =
2880                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2881                 unsigned long event_length = rb_event_length(event);
2882
2883                 /*
2884                  * For the before_stamp to be different than the write_stamp
2885                  * to make sure that the next event adds an absolute
2886                  * value and does not rely on the saved write stamp, which
2887                  * is now going to be bogus.
2888                  *
2889                  * By setting the before_stamp to zero, the next event
2890                  * is not going to use the write_stamp and will instead
2891                  * create an absolute timestamp. This means there's no
2892                  * reason to update the wirte_stamp!
2893                  */
2894                 rb_time_set(&cpu_buffer->before_stamp, 0);
2895
2896                 /*
2897                  * If an event were to come in now, it would see that the
2898                  * write_stamp and the before_stamp are different, and assume
2899                  * that this event just added itself before updating
2900                  * the write stamp. The interrupting event will fix the
2901                  * write stamp for us, and use an absolute timestamp.
2902                  */
2903
2904                 /*
2905                  * This is on the tail page. It is possible that
2906                  * a write could come in and move the tail page
2907                  * and write to the next page. That is fine
2908                  * because we just shorten what is on this page.
2909                  */
2910                 old_index += write_mask;
2911                 new_index += write_mask;
2912
2913                 /* caution: old_index gets updated on cmpxchg failure */
2914                 if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
2915                         /* update counters */
2916                         local_sub(event_length, &cpu_buffer->entries_bytes);
2917                         return true;
2918                 }
2919         }
2920
2921         /* could not discard */
2922         return false;
2923 }
2924
2925 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2926 {
2927         local_inc(&cpu_buffer->committing);
2928         local_inc(&cpu_buffer->commits);
2929 }
2930
2931 static __always_inline void
2932 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2933 {
2934         unsigned long max_count;
2935
2936         /*
2937          * We only race with interrupts and NMIs on this CPU.
2938          * If we own the commit event, then we can commit
2939          * all others that interrupted us, since the interruptions
2940          * are in stack format (they finish before they come
2941          * back to us). This allows us to do a simple loop to
2942          * assign the commit to the tail.
2943          */
2944  again:
2945         max_count = cpu_buffer->nr_pages * 100;
2946
2947         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2948                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2949                         return;
2950                 if (RB_WARN_ON(cpu_buffer,
2951                                rb_is_reader_page(cpu_buffer->tail_page)))
2952                         return;
2953                 /*
2954                  * No need for a memory barrier here, as the update
2955                  * of the tail_page did it for this page.
2956                  */
2957                 local_set(&cpu_buffer->commit_page->page->commit,
2958                           rb_page_write(cpu_buffer->commit_page));
2959                 rb_inc_page(&cpu_buffer->commit_page);
2960                 /* add barrier to keep gcc from optimizing too much */
2961                 barrier();
2962         }
2963         while (rb_commit_index(cpu_buffer) !=
2964                rb_page_write(cpu_buffer->commit_page)) {
2965
2966                 /* Make sure the readers see the content of what is committed. */
2967                 smp_wmb();
2968                 local_set(&cpu_buffer->commit_page->page->commit,
2969                           rb_page_write(cpu_buffer->commit_page));
2970                 RB_WARN_ON(cpu_buffer,
2971                            local_read(&cpu_buffer->commit_page->page->commit) &
2972                            ~RB_WRITE_MASK);
2973                 barrier();
2974         }
2975
2976         /* again, keep gcc from optimizing */
2977         barrier();
2978
2979         /*
2980          * If an interrupt came in just after the first while loop
2981          * and pushed the tail page forward, we will be left with
2982          * a dangling commit that will never go forward.
2983          */
2984         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2985                 goto again;
2986 }
2987
2988 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2989 {
2990         unsigned long commits;
2991
2992         if (RB_WARN_ON(cpu_buffer,
2993                        !local_read(&cpu_buffer->committing)))
2994                 return;
2995
2996  again:
2997         commits = local_read(&cpu_buffer->commits);
2998         /* synchronize with interrupts */
2999         barrier();
3000         if (local_read(&cpu_buffer->committing) == 1)
3001                 rb_set_commit_to_write(cpu_buffer);
3002
3003         local_dec(&cpu_buffer->committing);
3004
3005         /* synchronize with interrupts */
3006         barrier();
3007
3008         /*
3009          * Need to account for interrupts coming in between the
3010          * updating of the commit page and the clearing of the
3011          * committing counter.
3012          */
3013         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3014             !local_read(&cpu_buffer->committing)) {
3015                 local_inc(&cpu_buffer->committing);
3016                 goto again;
3017         }
3018 }
3019
3020 static inline void rb_event_discard(struct ring_buffer_event *event)
3021 {
3022         if (extended_time(event))
3023                 event = skip_time_extend(event);
3024
3025         /* array[0] holds the actual length for the discarded event */
3026         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3027         event->type_len = RINGBUF_TYPE_PADDING;
3028         /* time delta must be non zero */
3029         if (!event->time_delta)
3030                 event->time_delta = 1;
3031 }
3032
3033 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3034 {
3035         local_inc(&cpu_buffer->entries);
3036         rb_end_commit(cpu_buffer);
3037 }
3038
3039 static __always_inline void
3040 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3041 {
3042         if (buffer->irq_work.waiters_pending) {
3043                 buffer->irq_work.waiters_pending = false;
3044                 /* irq_work_queue() supplies it's own memory barriers */
3045                 irq_work_queue(&buffer->irq_work.work);
3046         }
3047
3048         if (cpu_buffer->irq_work.waiters_pending) {
3049                 cpu_buffer->irq_work.waiters_pending = false;
3050                 /* irq_work_queue() supplies it's own memory barriers */
3051                 irq_work_queue(&cpu_buffer->irq_work.work);
3052         }
3053
3054         if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3055                 return;
3056
3057         if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3058                 return;
3059
3060         if (!cpu_buffer->irq_work.full_waiters_pending)
3061                 return;
3062
3063         cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3064
3065         if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3066                 return;
3067
3068         cpu_buffer->irq_work.wakeup_full = true;
3069         cpu_buffer->irq_work.full_waiters_pending = false;
3070         /* irq_work_queue() supplies it's own memory barriers */
3071         irq_work_queue(&cpu_buffer->irq_work.work);
3072 }
3073
3074 #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3075 # define do_ring_buffer_record_recursion()      \
3076         do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3077 #else
3078 # define do_ring_buffer_record_recursion() do { } while (0)
3079 #endif
3080
3081 /*
3082  * The lock and unlock are done within a preempt disable section.
3083  * The current_context per_cpu variable can only be modified
3084  * by the current task between lock and unlock. But it can
3085  * be modified more than once via an interrupt. To pass this
3086  * information from the lock to the unlock without having to
3087  * access the 'in_interrupt()' functions again (which do show
3088  * a bit of overhead in something as critical as function tracing,
3089  * we use a bitmask trick.
3090  *
3091  *  bit 1 =  NMI context
3092  *  bit 2 =  IRQ context
3093  *  bit 3 =  SoftIRQ context
3094  *  bit 4 =  normal context.
3095  *
3096  * This works because this is the order of contexts that can
3097  * preempt other contexts. A SoftIRQ never preempts an IRQ
3098  * context.
3099  *
3100  * When the context is determined, the corresponding bit is
3101  * checked and set (if it was set, then a recursion of that context
3102  * happened).
3103  *
3104  * On unlock, we need to clear this bit. To do so, just subtract
3105  * 1 from the current_context and AND it to itself.
3106  *
3107  * (binary)
3108  *  101 - 1 = 100
3109  *  101 & 100 = 100 (clearing bit zero)
3110  *
3111  *  1010 - 1 = 1001
3112  *  1010 & 1001 = 1000 (clearing bit 1)
3113  *
3114  * The least significant bit can be cleared this way, and it
3115  * just so happens that it is the same bit corresponding to
3116  * the current context.
3117  *
3118  * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3119  * is set when a recursion is detected at the current context, and if
3120  * the TRANSITION bit is already set, it will fail the recursion.
3121  * This is needed because there's a lag between the changing of
3122  * interrupt context and updating the preempt count. In this case,
3123  * a false positive will be found. To handle this, one extra recursion
3124  * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3125  * bit is already set, then it is considered a recursion and the function
3126  * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3127  *
3128  * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3129  * to be cleared. Even if it wasn't the context that set it. That is,
3130  * if an interrupt comes in while NORMAL bit is set and the ring buffer
3131  * is called before preempt_count() is updated, since the check will
3132  * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3133  * NMI then comes in, it will set the NMI bit, but when the NMI code
3134  * does the trace_recursive_unlock() it will clear the TRANSITION bit
3135  * and leave the NMI bit set. But this is fine, because the interrupt
3136  * code that set the TRANSITION bit will then clear the NMI bit when it
3137  * calls trace_recursive_unlock(). If another NMI comes in, it will
3138  * set the TRANSITION bit and continue.
3139  *
3140  * Note: The TRANSITION bit only handles a single transition between context.
3141  */
3142
3143 static __always_inline bool
3144 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3145 {
3146         unsigned int val = cpu_buffer->current_context;
3147         int bit = interrupt_context_level();
3148
3149         bit = RB_CTX_NORMAL - bit;
3150
3151         if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3152                 /*
3153                  * It is possible that this was called by transitioning
3154                  * between interrupt context, and preempt_count() has not
3155                  * been updated yet. In this case, use the TRANSITION bit.
3156                  */
3157                 bit = RB_CTX_TRANSITION;
3158                 if (val & (1 << (bit + cpu_buffer->nest))) {
3159                         do_ring_buffer_record_recursion();
3160                         return true;
3161                 }
3162         }
3163
3164         val |= (1 << (bit + cpu_buffer->nest));
3165         cpu_buffer->current_context = val;
3166
3167         return false;
3168 }
3169
3170 static __always_inline void
3171 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3172 {
3173         cpu_buffer->current_context &=
3174                 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3175 }
3176
3177 /* The recursive locking above uses 5 bits */
3178 #define NESTED_BITS 5
3179
3180 /**
3181  * ring_buffer_nest_start - Allow to trace while nested
3182  * @buffer: The ring buffer to modify
3183  *
3184  * The ring buffer has a safety mechanism to prevent recursion.
3185  * But there may be a case where a trace needs to be done while
3186  * tracing something else. In this case, calling this function
3187  * will allow this function to nest within a currently active
3188  * ring_buffer_lock_reserve().
3189  *
3190  * Call this function before calling another ring_buffer_lock_reserve() and
3191  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3192  */
3193 void ring_buffer_nest_start(struct trace_buffer *buffer)
3194 {
3195         struct ring_buffer_per_cpu *cpu_buffer;
3196         int cpu;
3197
3198         /* Enabled by ring_buffer_nest_end() */
3199         preempt_disable_notrace();
3200         cpu = raw_smp_processor_id();
3201         cpu_buffer = buffer->buffers[cpu];
3202         /* This is the shift value for the above recursive locking */
3203         cpu_buffer->nest += NESTED_BITS;
3204 }
3205
3206 /**
3207  * ring_buffer_nest_end - Allow to trace while nested
3208  * @buffer: The ring buffer to modify
3209  *
3210  * Must be called after ring_buffer_nest_start() and after the
3211  * ring_buffer_unlock_commit().
3212  */
3213 void ring_buffer_nest_end(struct trace_buffer *buffer)
3214 {
3215         struct ring_buffer_per_cpu *cpu_buffer;
3216         int cpu;
3217
3218         /* disabled by ring_buffer_nest_start() */
3219         cpu = raw_smp_processor_id();
3220         cpu_buffer = buffer->buffers[cpu];
3221         /* This is the shift value for the above recursive locking */
3222         cpu_buffer->nest -= NESTED_BITS;
3223         preempt_enable_notrace();
3224 }
3225
3226 /**
3227  * ring_buffer_unlock_commit - commit a reserved
3228  * @buffer: The buffer to commit to
3229  *
3230  * This commits the data to the ring buffer, and releases any locks held.
3231  *
3232  * Must be paired with ring_buffer_lock_reserve.
3233  */
3234 int ring_buffer_unlock_commit(struct trace_buffer *buffer)
3235 {
3236         struct ring_buffer_per_cpu *cpu_buffer;
3237         int cpu = raw_smp_processor_id();
3238
3239         cpu_buffer = buffer->buffers[cpu];
3240
3241         rb_commit(cpu_buffer);
3242
3243         rb_wakeups(buffer, cpu_buffer);
3244
3245         trace_recursive_unlock(cpu_buffer);
3246
3247         preempt_enable_notrace();
3248
3249         return 0;
3250 }
3251 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3252
3253 /* Special value to validate all deltas on a page. */
3254 #define CHECK_FULL_PAGE         1L
3255
3256 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3257
3258 static const char *show_irq_str(int bits)
3259 {
3260         const char *type[] = {
3261                 ".",    // 0
3262                 "s",    // 1
3263                 "h",    // 2
3264                 "Hs",   // 3
3265                 "n",    // 4
3266                 "Ns",   // 5
3267                 "Nh",   // 6
3268                 "NHs",  // 7
3269         };
3270
3271         return type[bits];
3272 }
3273
3274 /* Assume this is an trace event */
3275 static const char *show_flags(struct ring_buffer_event *event)
3276 {
3277         struct trace_entry *entry;
3278         int bits = 0;
3279
3280         if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3281                 return "X";
3282
3283         entry = ring_buffer_event_data(event);
3284
3285         if (entry->flags & TRACE_FLAG_SOFTIRQ)
3286                 bits |= 1;
3287
3288         if (entry->flags & TRACE_FLAG_HARDIRQ)
3289                 bits |= 2;
3290
3291         if (entry->flags & TRACE_FLAG_NMI)
3292                 bits |= 4;
3293
3294         return show_irq_str(bits);
3295 }
3296
3297 static const char *show_irq(struct ring_buffer_event *event)
3298 {
3299         struct trace_entry *entry;
3300
3301         if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3302                 return "";
3303
3304         entry = ring_buffer_event_data(event);
3305         if (entry->flags & TRACE_FLAG_IRQS_OFF)
3306                 return "d";
3307         return "";
3308 }
3309
3310 static const char *show_interrupt_level(void)
3311 {
3312         unsigned long pc = preempt_count();
3313         unsigned char level = 0;
3314
3315         if (pc & SOFTIRQ_OFFSET)
3316                 level |= 1;
3317
3318         if (pc & HARDIRQ_MASK)
3319                 level |= 2;
3320
3321         if (pc & NMI_MASK)
3322                 level |= 4;
3323
3324         return show_irq_str(level);
3325 }
3326
3327 static void dump_buffer_page(struct buffer_data_page *bpage,
3328                              struct rb_event_info *info,
3329                              unsigned long tail)
3330 {
3331         struct ring_buffer_event *event;
3332         u64 ts, delta;
3333         int e;
3334
3335         ts = bpage->time_stamp;
3336         pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3337
3338         for (e = 0; e < tail; e += rb_event_length(event)) {
3339
3340                 event = (struct ring_buffer_event *)(bpage->data + e);
3341
3342                 switch (event->type_len) {
3343
3344                 case RINGBUF_TYPE_TIME_EXTEND:
3345                         delta = rb_event_time_stamp(event);
3346                         ts += delta;
3347                         pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
3348                                 e, ts, delta);
3349                         break;
3350
3351                 case RINGBUF_TYPE_TIME_STAMP:
3352                         delta = rb_event_time_stamp(event);
3353                         ts = rb_fix_abs_ts(delta, ts);
3354                         pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
3355                                 e, ts, delta);
3356                         break;
3357
3358                 case RINGBUF_TYPE_PADDING:
3359                         ts += event->time_delta;
3360                         pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
3361                                 e, ts, event->time_delta);
3362                         break;
3363
3364                 case RINGBUF_TYPE_DATA:
3365                         ts += event->time_delta;
3366                         pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
3367                                 e, ts, event->time_delta,
3368                                 show_flags(event), show_irq(event));
3369                         break;
3370
3371                 default:
3372                         break;
3373                 }
3374         }
3375         pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
3376 }
3377
3378 static DEFINE_PER_CPU(atomic_t, checking);
3379 static atomic_t ts_dump;
3380
3381 #define buffer_warn_return(fmt, ...)                                    \
3382         do {                                                            \
3383                 /* If another report is happening, ignore this one */   \
3384                 if (atomic_inc_return(&ts_dump) != 1) {                 \
3385                         atomic_dec(&ts_dump);                           \
3386                         goto out;                                       \
3387                 }                                                       \
3388                 atomic_inc(&cpu_buffer->record_disabled);               \
3389                 pr_warn(fmt, ##__VA_ARGS__);                            \
3390                 dump_buffer_page(bpage, info, tail);                    \
3391                 atomic_dec(&ts_dump);                                   \
3392                 /* There's some cases in boot up that this can happen */ \
3393                 if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))       \
3394                         /* Do not re-enable checking */                 \
3395                         return;                                         \
3396         } while (0)
3397
3398 /*
3399  * Check if the current event time stamp matches the deltas on
3400  * the buffer page.
3401  */
3402 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3403                          struct rb_event_info *info,
3404                          unsigned long tail)
3405 {
3406         struct ring_buffer_event *event;
3407         struct buffer_data_page *bpage;
3408         u64 ts, delta;
3409         bool full = false;
3410         int e;
3411
3412         bpage = info->tail_page->page;
3413
3414         if (tail == CHECK_FULL_PAGE) {
3415                 full = true;
3416                 tail = local_read(&bpage->commit);
3417         } else if (info->add_timestamp &
3418                    (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3419                 /* Ignore events with absolute time stamps */
3420                 return;
3421         }
3422
3423         /*
3424          * Do not check the first event (skip possible extends too).
3425          * Also do not check if previous events have not been committed.
3426          */
3427         if (tail <= 8 || tail > local_read(&bpage->commit))
3428                 return;
3429
3430         /*
3431          * If this interrupted another event,
3432          */
3433         if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3434                 goto out;
3435
3436         ts = bpage->time_stamp;
3437
3438         for (e = 0; e < tail; e += rb_event_length(event)) {
3439
3440                 event = (struct ring_buffer_event *)(bpage->data + e);
3441
3442                 switch (event->type_len) {
3443
3444                 case RINGBUF_TYPE_TIME_EXTEND:
3445                         delta = rb_event_time_stamp(event);
3446                         ts += delta;
3447                         break;
3448
3449                 case RINGBUF_TYPE_TIME_STAMP:
3450                         delta = rb_event_time_stamp(event);
3451                         delta = rb_fix_abs_ts(delta, ts);
3452                         if (delta < ts) {
3453                                 buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
3454                                                    cpu_buffer->cpu, ts, delta);
3455                         }
3456                         ts = delta;
3457                         break;
3458
3459                 case RINGBUF_TYPE_PADDING:
3460                         if (event->time_delta == 1)
3461                                 break;
3462                         fallthrough;
3463                 case RINGBUF_TYPE_DATA:
3464                         ts += event->time_delta;
3465                         break;
3466
3467                 default:
3468                         RB_WARN_ON(cpu_buffer, 1);
3469                 }
3470         }
3471         if ((full && ts > info->ts) ||
3472             (!full && ts + info->delta != info->ts)) {
3473                 buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
3474                                    cpu_buffer->cpu,
3475                                    ts + info->delta, info->ts, info->delta,
3476                                    info->before, info->after,
3477                                    full ? " (full)" : "", show_interrupt_level());
3478         }
3479 out:
3480         atomic_dec(this_cpu_ptr(&checking));
3481 }
3482 #else
3483 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3484                          struct rb_event_info *info,
3485                          unsigned long tail)
3486 {
3487 }
3488 #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3489
3490 static struct ring_buffer_event *
3491 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3492                   struct rb_event_info *info)
3493 {
3494         struct ring_buffer_event *event;
3495         struct buffer_page *tail_page;
3496         unsigned long tail, write, w;
3497
3498         /* Don't let the compiler play games with cpu_buffer->tail_page */
3499         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3500
3501  /*A*/  w = local_read(&tail_page->write) & RB_WRITE_MASK;
3502         barrier();
3503         rb_time_read(&cpu_buffer->before_stamp, &info->before);
3504         rb_time_read(&cpu_buffer->write_stamp, &info->after);
3505         barrier();
3506         info->ts = rb_time_stamp(cpu_buffer->buffer);
3507
3508         if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3509                 info->delta = info->ts;
3510         } else {
3511                 /*
3512                  * If interrupting an event time update, we may need an
3513                  * absolute timestamp.
3514                  * Don't bother if this is the start of a new page (w == 0).
3515                  */
3516                 if (!w) {
3517                         /* Use the sub-buffer timestamp */
3518                         info->delta = 0;
3519                 } else if (unlikely(info->before != info->after)) {
3520                         info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3521                         info->length += RB_LEN_TIME_EXTEND;
3522                 } else {
3523                         info->delta = info->ts - info->after;
3524                         if (unlikely(test_time_stamp(info->delta))) {
3525                                 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3526                                 info->length += RB_LEN_TIME_EXTEND;
3527                         }
3528                 }
3529         }
3530
3531  /*B*/  rb_time_set(&cpu_buffer->before_stamp, info->ts);
3532
3533  /*C*/  write = local_add_return(info->length, &tail_page->write);
3534
3535         /* set write to only the index of the write */
3536         write &= RB_WRITE_MASK;
3537
3538         tail = write - info->length;
3539
3540         /* See if we shot pass the end of this buffer page */
3541         if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
3542                 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3543                 return rb_move_tail(cpu_buffer, tail, info);
3544         }
3545
3546         if (likely(tail == w)) {
3547                 /* Nothing interrupted us between A and C */
3548  /*D*/          rb_time_set(&cpu_buffer->write_stamp, info->ts);
3549                 /*
3550                  * If something came in between C and D, the write stamp
3551                  * may now not be in sync. But that's fine as the before_stamp
3552                  * will be different and then next event will just be forced
3553                  * to use an absolute timestamp.
3554                  */
3555                 if (likely(!(info->add_timestamp &
3556                              (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3557                         /* This did not interrupt any time update */
3558                         info->delta = info->ts - info->after;
3559                 else
3560                         /* Just use full timestamp for interrupting event */
3561                         info->delta = info->ts;
3562                 check_buffer(cpu_buffer, info, tail);
3563         } else {
3564                 u64 ts;
3565                 /* SLOW PATH - Interrupted between A and C */
3566
3567                 /* Save the old before_stamp */
3568                 rb_time_read(&cpu_buffer->before_stamp, &info->before);
3569
3570                 /*
3571                  * Read a new timestamp and update the before_stamp to make
3572                  * the next event after this one force using an absolute
3573                  * timestamp. This is in case an interrupt were to come in
3574                  * between E and F.
3575                  */
3576                 ts = rb_time_stamp(cpu_buffer->buffer);
3577                 rb_time_set(&cpu_buffer->before_stamp, ts);
3578
3579                 barrier();
3580  /*E*/          rb_time_read(&cpu_buffer->write_stamp, &info->after);
3581                 barrier();
3582  /*F*/          if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3583                     info->after == info->before && info->after < ts) {
3584                         /*
3585                          * Nothing came after this event between C and F, it is
3586                          * safe to use info->after for the delta as it
3587                          * matched info->before and is still valid.
3588                          */
3589                         info->delta = ts - info->after;
3590                 } else {
3591                         /*
3592                          * Interrupted between C and F:
3593                          * Lost the previous events time stamp. Just set the
3594                          * delta to zero, and this will be the same time as
3595                          * the event this event interrupted. And the events that
3596                          * came after this will still be correct (as they would
3597                          * have built their delta on the previous event.
3598                          */
3599                         info->delta = 0;
3600                 }
3601                 info->ts = ts;
3602                 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3603         }
3604
3605         /*
3606          * If this is the first commit on the page, then it has the same
3607          * timestamp as the page itself.
3608          */
3609         if (unlikely(!tail && !(info->add_timestamp &
3610                                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3611                 info->delta = 0;
3612
3613         /* We reserved something on the buffer */
3614
3615         event = __rb_page_index(tail_page, tail);
3616         rb_update_event(cpu_buffer, event, info);
3617
3618         local_inc(&tail_page->entries);
3619
3620         /*
3621          * If this is the first commit on the page, then update
3622          * its timestamp.
3623          */
3624         if (unlikely(!tail))
3625                 tail_page->page->time_stamp = info->ts;
3626
3627         /* account for these added bytes */
3628         local_add(info->length, &cpu_buffer->entries_bytes);
3629
3630         return event;
3631 }
3632
3633 static __always_inline struct ring_buffer_event *
3634 rb_reserve_next_event(struct trace_buffer *buffer,
3635                       struct ring_buffer_per_cpu *cpu_buffer,
3636                       unsigned long length)
3637 {
3638         struct ring_buffer_event *event;
3639         struct rb_event_info info;
3640         int nr_loops = 0;
3641         int add_ts_default;
3642
3643         /* ring buffer does cmpxchg, make sure it is safe in NMI context */
3644         if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3645             (unlikely(in_nmi()))) {
3646                 return NULL;
3647         }
3648
3649         rb_start_commit(cpu_buffer);
3650         /* The commit page can not change after this */
3651
3652 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3653         /*
3654          * Due to the ability to swap a cpu buffer from a buffer
3655          * it is possible it was swapped before we committed.
3656          * (committing stops a swap). We check for it here and
3657          * if it happened, we have to fail the write.
3658          */
3659         barrier();
3660         if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3661                 local_dec(&cpu_buffer->committing);
3662                 local_dec(&cpu_buffer->commits);
3663                 return NULL;
3664         }
3665 #endif
3666
3667         info.length = rb_calculate_event_length(length);
3668
3669         if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3670                 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3671                 info.length += RB_LEN_TIME_EXTEND;
3672                 if (info.length > cpu_buffer->buffer->max_data_size)
3673                         goto out_fail;
3674         } else {
3675                 add_ts_default = RB_ADD_STAMP_NONE;
3676         }
3677
3678  again:
3679         info.add_timestamp = add_ts_default;
3680         info.delta = 0;
3681
3682         /*
3683          * We allow for interrupts to reenter here and do a trace.
3684          * If one does, it will cause this original code to loop
3685          * back here. Even with heavy interrupts happening, this
3686          * should only happen a few times in a row. If this happens
3687          * 1000 times in a row, there must be either an interrupt
3688          * storm or we have something buggy.
3689          * Bail!
3690          */
3691         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3692                 goto out_fail;
3693
3694         event = __rb_reserve_next(cpu_buffer, &info);
3695
3696         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3697                 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3698                         info.length -= RB_LEN_TIME_EXTEND;
3699                 goto again;
3700         }
3701
3702         if (likely(event))
3703                 return event;
3704  out_fail:
3705         rb_end_commit(cpu_buffer);
3706         return NULL;
3707 }
3708
3709 /**
3710  * ring_buffer_lock_reserve - reserve a part of the buffer
3711  * @buffer: the ring buffer to reserve from
3712  * @length: the length of the data to reserve (excluding event header)
3713  *
3714  * Returns a reserved event on the ring buffer to copy directly to.
3715  * The user of this interface will need to get the body to write into
3716  * and can use the ring_buffer_event_data() interface.
3717  *
3718  * The length is the length of the data needed, not the event length
3719  * which also includes the event header.
3720  *
3721  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3722  * If NULL is returned, then nothing has been allocated or locked.
3723  */
3724 struct ring_buffer_event *
3725 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3726 {
3727         struct ring_buffer_per_cpu *cpu_buffer;
3728         struct ring_buffer_event *event;
3729         int cpu;
3730
3731         /* If we are tracing schedule, we don't want to recurse */
3732         preempt_disable_notrace();
3733
3734         if (unlikely(atomic_read(&buffer->record_disabled)))
3735                 goto out;
3736
3737         cpu = raw_smp_processor_id();
3738
3739         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3740                 goto out;
3741
3742         cpu_buffer = buffer->buffers[cpu];
3743
3744         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3745                 goto out;
3746
3747         if (unlikely(length > buffer->max_data_size))
3748                 goto out;
3749
3750         if (unlikely(trace_recursive_lock(cpu_buffer)))
3751                 goto out;
3752
3753         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3754         if (!event)
3755                 goto out_unlock;
3756
3757         return event;
3758
3759  out_unlock:
3760         trace_recursive_unlock(cpu_buffer);
3761  out:
3762         preempt_enable_notrace();
3763         return NULL;
3764 }
3765 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3766
3767 /*
3768  * Decrement the entries to the page that an event is on.
3769  * The event does not even need to exist, only the pointer
3770  * to the page it is on. This may only be called before the commit
3771  * takes place.
3772  */
3773 static inline void
3774 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3775                    struct ring_buffer_event *event)
3776 {
3777         unsigned long addr = (unsigned long)event;
3778         struct buffer_page *bpage = cpu_buffer->commit_page;
3779         struct buffer_page *start;
3780
3781         addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3782
3783         /* Do the likely case first */
3784         if (likely(bpage->page == (void *)addr)) {
3785                 local_dec(&bpage->entries);
3786                 return;
3787         }
3788
3789         /*
3790          * Because the commit page may be on the reader page we
3791          * start with the next page and check the end loop there.
3792          */
3793         rb_inc_page(&bpage);
3794         start = bpage;
3795         do {
3796                 if (bpage->page == (void *)addr) {
3797                         local_dec(&bpage->entries);
3798                         return;
3799                 }
3800                 rb_inc_page(&bpage);
3801         } while (bpage != start);
3802
3803         /* commit not part of this buffer?? */
3804         RB_WARN_ON(cpu_buffer, 1);
3805 }
3806
3807 /**
3808  * ring_buffer_discard_commit - discard an event that has not been committed
3809  * @buffer: the ring buffer
3810  * @event: non committed event to discard
3811  *
3812  * Sometimes an event that is in the ring buffer needs to be ignored.
3813  * This function lets the user discard an event in the ring buffer
3814  * and then that event will not be read later.
3815  *
3816  * This function only works if it is called before the item has been
3817  * committed. It will try to free the event from the ring buffer
3818  * if another event has not been added behind it.
3819  *
3820  * If another event has been added behind it, it will set the event
3821  * up as discarded, and perform the commit.
3822  *
3823  * If this function is called, do not call ring_buffer_unlock_commit on
3824  * the event.
3825  */
3826 void ring_buffer_discard_commit(struct trace_buffer *buffer,
3827                                 struct ring_buffer_event *event)
3828 {
3829         struct ring_buffer_per_cpu *cpu_buffer;
3830         int cpu;
3831
3832         /* The event is discarded regardless */
3833         rb_event_discard(event);
3834
3835         cpu = smp_processor_id();
3836         cpu_buffer = buffer->buffers[cpu];
3837
3838         /*
3839          * This must only be called if the event has not been
3840          * committed yet. Thus we can assume that preemption
3841          * is still disabled.
3842          */
3843         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3844
3845         rb_decrement_entry(cpu_buffer, event);
3846         if (rb_try_to_discard(cpu_buffer, event))
3847                 goto out;
3848
3849  out:
3850         rb_end_commit(cpu_buffer);
3851
3852         trace_recursive_unlock(cpu_buffer);
3853
3854         preempt_enable_notrace();
3855
3856 }
3857 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3858
3859 /**
3860  * ring_buffer_write - write data to the buffer without reserving
3861  * @buffer: The ring buffer to write to.
3862  * @length: The length of the data being written (excluding the event header)
3863  * @data: The data to write to the buffer.
3864  *
3865  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3866  * one function. If you already have the data to write to the buffer, it
3867  * may be easier to simply call this function.
3868  *
3869  * Note, like ring_buffer_lock_reserve, the length is the length of the data
3870  * and not the length of the event which would hold the header.
3871  */
3872 int ring_buffer_write(struct trace_buffer *buffer,
3873                       unsigned long length,
3874                       void *data)
3875 {
3876         struct ring_buffer_per_cpu *cpu_buffer;
3877         struct ring_buffer_event *event;
3878         void *body;
3879         int ret = -EBUSY;
3880         int cpu;
3881
3882         preempt_disable_notrace();
3883
3884         if (atomic_read(&buffer->record_disabled))
3885                 goto out;
3886
3887         cpu = raw_smp_processor_id();
3888
3889         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3890                 goto out;
3891
3892         cpu_buffer = buffer->buffers[cpu];
3893
3894         if (atomic_read(&cpu_buffer->record_disabled))
3895                 goto out;
3896
3897         if (length > buffer->max_data_size)
3898                 goto out;
3899
3900         if (unlikely(trace_recursive_lock(cpu_buffer)))
3901                 goto out;
3902
3903         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3904         if (!event)
3905                 goto out_unlock;
3906
3907         body = rb_event_data(event);
3908
3909         memcpy(body, data, length);
3910
3911         rb_commit(cpu_buffer);
3912
3913         rb_wakeups(buffer, cpu_buffer);
3914
3915         ret = 0;
3916
3917  out_unlock:
3918         trace_recursive_unlock(cpu_buffer);
3919
3920  out:
3921         preempt_enable_notrace();
3922
3923         return ret;
3924 }
3925 EXPORT_SYMBOL_GPL(ring_buffer_write);
3926
3927 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3928 {
3929         struct buffer_page *reader = cpu_buffer->reader_page;
3930         struct buffer_page *head = rb_set_head_page(cpu_buffer);
3931         struct buffer_page *commit = cpu_buffer->commit_page;
3932
3933         /* In case of error, head will be NULL */
3934         if (unlikely(!head))
3935                 return true;
3936
3937         /* Reader should exhaust content in reader page */
3938         if (reader->read != rb_page_commit(reader))
3939                 return false;
3940
3941         /*
3942          * If writers are committing on the reader page, knowing all
3943          * committed content has been read, the ring buffer is empty.
3944          */
3945         if (commit == reader)
3946                 return true;
3947
3948         /*
3949          * If writers are committing on a page other than reader page
3950          * and head page, there should always be content to read.
3951          */
3952         if (commit != head)
3953                 return false;
3954
3955         /*
3956          * Writers are committing on the head page, we just need
3957          * to care about there're committed data, and the reader will
3958          * swap reader page with head page when it is to read data.
3959          */
3960         return rb_page_commit(commit) == 0;
3961 }
3962
3963 /**
3964  * ring_buffer_record_disable - stop all writes into the buffer
3965  * @buffer: The ring buffer to stop writes to.
3966  *
3967  * This prevents all writes to the buffer. Any attempt to write
3968  * to the buffer after this will fail and return NULL.
3969  *
3970  * The caller should call synchronize_rcu() after this.
3971  */
3972 void ring_buffer_record_disable(struct trace_buffer *buffer)
3973 {
3974         atomic_inc(&buffer->record_disabled);
3975 }
3976 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3977
3978 /**
3979  * ring_buffer_record_enable - enable writes to the buffer
3980  * @buffer: The ring buffer to enable writes
3981  *
3982  * Note, multiple disables will need the same number of enables
3983  * to truly enable the writing (much like preempt_disable).
3984  */
3985 void ring_buffer_record_enable(struct trace_buffer *buffer)
3986 {
3987         atomic_dec(&buffer->record_disabled);
3988 }
3989 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3990
3991 /**
3992  * ring_buffer_record_off - stop all writes into the buffer
3993  * @buffer: The ring buffer to stop writes to.
3994  *
3995  * This prevents all writes to the buffer. Any attempt to write
3996  * to the buffer after this will fail and return NULL.
3997  *
3998  * This is different than ring_buffer_record_disable() as
3999  * it works like an on/off switch, where as the disable() version
4000  * must be paired with a enable().
4001  */
4002 void ring_buffer_record_off(struct trace_buffer *buffer)
4003 {
4004         unsigned int rd;
4005         unsigned int new_rd;
4006
4007         rd = atomic_read(&buffer->record_disabled);
4008         do {
4009                 new_rd = rd | RB_BUFFER_OFF;
4010         } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4011 }
4012 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4013
4014 /**
4015  * ring_buffer_record_on - restart writes into the buffer
4016  * @buffer: The ring buffer to start writes to.
4017  *
4018  * This enables all writes to the buffer that was disabled by
4019  * ring_buffer_record_off().
4020  *
4021  * This is different than ring_buffer_record_enable() as
4022  * it works like an on/off switch, where as the enable() version
4023  * must be paired with a disable().
4024  */
4025 void ring_buffer_record_on(struct trace_buffer *buffer)
4026 {
4027         unsigned int rd;
4028         unsigned int new_rd;
4029
4030         rd = atomic_read(&buffer->record_disabled);
4031         do {
4032                 new_rd = rd & ~RB_BUFFER_OFF;
4033         } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4034 }
4035 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4036
4037 /**
4038  * ring_buffer_record_is_on - return true if the ring buffer can write
4039  * @buffer: The ring buffer to see if write is enabled
4040  *
4041  * Returns true if the ring buffer is in a state that it accepts writes.
4042  */
4043 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4044 {
4045         return !atomic_read(&buffer->record_disabled);
4046 }
4047
4048 /**
4049  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4050  * @buffer: The ring buffer to see if write is set enabled
4051  *
4052  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4053  * Note that this does NOT mean it is in a writable state.
4054  *
4055  * It may return true when the ring buffer has been disabled by
4056  * ring_buffer_record_disable(), as that is a temporary disabling of
4057  * the ring buffer.
4058  */
4059 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4060 {
4061         return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4062 }
4063
4064 /**
4065  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4066  * @buffer: The ring buffer to stop writes to.
4067  * @cpu: The CPU buffer to stop
4068  *
4069  * This prevents all writes to the buffer. Any attempt to write
4070  * to the buffer after this will fail and return NULL.
4071  *
4072  * The caller should call synchronize_rcu() after this.
4073  */
4074 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4075 {
4076         struct ring_buffer_per_cpu *cpu_buffer;
4077
4078         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4079                 return;
4080
4081         cpu_buffer = buffer->buffers[cpu];
4082         atomic_inc(&cpu_buffer->record_disabled);
4083 }
4084 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4085
4086 /**
4087  * ring_buffer_record_enable_cpu - enable writes to the buffer
4088  * @buffer: The ring buffer to enable writes
4089  * @cpu: The CPU to enable.
4090  *
4091  * Note, multiple disables will need the same number of enables
4092  * to truly enable the writing (much like preempt_disable).
4093  */
4094 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4095 {
4096         struct ring_buffer_per_cpu *cpu_buffer;
4097
4098         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4099                 return;
4100
4101         cpu_buffer = buffer->buffers[cpu];
4102         atomic_dec(&cpu_buffer->record_disabled);
4103 }
4104 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4105
4106 /*
4107  * The total entries in the ring buffer is the running counter
4108  * of entries entered into the ring buffer, minus the sum of
4109  * the entries read from the ring buffer and the number of
4110  * entries that were overwritten.
4111  */
4112 static inline unsigned long
4113 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4114 {
4115         return local_read(&cpu_buffer->entries) -
4116                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4117 }
4118
4119 /**
4120  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4121  * @buffer: The ring buffer
4122  * @cpu: The per CPU buffer to read from.
4123  */
4124 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4125 {
4126         unsigned long flags;
4127         struct ring_buffer_per_cpu *cpu_buffer;
4128         struct buffer_page *bpage;
4129         u64 ret = 0;
4130
4131         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4132                 return 0;
4133
4134         cpu_buffer = buffer->buffers[cpu];
4135         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4136         /*
4137          * if the tail is on reader_page, oldest time stamp is on the reader
4138          * page
4139          */
4140         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4141                 bpage = cpu_buffer->reader_page;
4142         else
4143                 bpage = rb_set_head_page(cpu_buffer);
4144         if (bpage)
4145                 ret = bpage->page->time_stamp;
4146         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4147
4148         return ret;
4149 }
4150 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4151
4152 /**
4153  * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4154  * @buffer: The ring buffer
4155  * @cpu: The per CPU buffer to read from.
4156  */
4157 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4158 {
4159         struct ring_buffer_per_cpu *cpu_buffer;
4160         unsigned long ret;
4161
4162         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163                 return 0;
4164
4165         cpu_buffer = buffer->buffers[cpu];
4166         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4167
4168         return ret;
4169 }
4170 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4171
4172 /**
4173  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4174  * @buffer: The ring buffer
4175  * @cpu: The per CPU buffer to get the entries from.
4176  */
4177 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4178 {
4179         struct ring_buffer_per_cpu *cpu_buffer;
4180
4181         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4182                 return 0;
4183
4184         cpu_buffer = buffer->buffers[cpu];
4185
4186         return rb_num_of_entries(cpu_buffer);
4187 }
4188 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4189
4190 /**
4191  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4192  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4193  * @buffer: The ring buffer
4194  * @cpu: The per CPU buffer to get the number of overruns from
4195  */
4196 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4197 {
4198         struct ring_buffer_per_cpu *cpu_buffer;
4199         unsigned long ret;
4200
4201         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4202                 return 0;
4203
4204         cpu_buffer = buffer->buffers[cpu];
4205         ret = local_read(&cpu_buffer->overrun);
4206
4207         return ret;
4208 }
4209 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4210
4211 /**
4212  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4213  * commits failing due to the buffer wrapping around while there are uncommitted
4214  * events, such as during an interrupt storm.
4215  * @buffer: The ring buffer
4216  * @cpu: The per CPU buffer to get the number of overruns from
4217  */
4218 unsigned long
4219 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4220 {
4221         struct ring_buffer_per_cpu *cpu_buffer;
4222         unsigned long ret;
4223
4224         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4225                 return 0;
4226
4227         cpu_buffer = buffer->buffers[cpu];
4228         ret = local_read(&cpu_buffer->commit_overrun);
4229
4230         return ret;
4231 }
4232 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4233
4234 /**
4235  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4236  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4237  * @buffer: The ring buffer
4238  * @cpu: The per CPU buffer to get the number of overruns from
4239  */
4240 unsigned long
4241 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4242 {
4243         struct ring_buffer_per_cpu *cpu_buffer;
4244         unsigned long ret;
4245
4246         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4247                 return 0;
4248
4249         cpu_buffer = buffer->buffers[cpu];
4250         ret = local_read(&cpu_buffer->dropped_events);
4251
4252         return ret;
4253 }
4254 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4255
4256 /**
4257  * ring_buffer_read_events_cpu - get the number of events successfully read
4258  * @buffer: The ring buffer
4259  * @cpu: The per CPU buffer to get the number of events read
4260  */
4261 unsigned long
4262 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4263 {
4264         struct ring_buffer_per_cpu *cpu_buffer;
4265
4266         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4267                 return 0;
4268
4269         cpu_buffer = buffer->buffers[cpu];
4270         return cpu_buffer->read;
4271 }
4272 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4273
4274 /**
4275  * ring_buffer_entries - get the number of entries in a buffer
4276  * @buffer: The ring buffer
4277  *
4278  * Returns the total number of entries in the ring buffer
4279  * (all CPU entries)
4280  */
4281 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4282 {
4283         struct ring_buffer_per_cpu *cpu_buffer;
4284         unsigned long entries = 0;
4285         int cpu;
4286
4287         /* if you care about this being correct, lock the buffer */
4288         for_each_buffer_cpu(buffer, cpu) {
4289                 cpu_buffer = buffer->buffers[cpu];
4290                 entries += rb_num_of_entries(cpu_buffer);
4291         }
4292
4293         return entries;
4294 }
4295 EXPORT_SYMBOL_GPL(ring_buffer_entries);
4296
4297 /**
4298  * ring_buffer_overruns - get the number of overruns in buffer
4299  * @buffer: The ring buffer
4300  *
4301  * Returns the total number of overruns in the ring buffer
4302  * (all CPU entries)
4303  */
4304 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4305 {
4306         struct ring_buffer_per_cpu *cpu_buffer;
4307         unsigned long overruns = 0;
4308         int cpu;
4309
4310         /* if you care about this being correct, lock the buffer */
4311         for_each_buffer_cpu(buffer, cpu) {
4312                 cpu_buffer = buffer->buffers[cpu];
4313                 overruns += local_read(&cpu_buffer->overrun);
4314         }
4315
4316         return overruns;
4317 }
4318 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4319
4320 static void rb_iter_reset(struct ring_buffer_iter *iter)
4321 {
4322         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4323
4324         /* Iterator usage is expected to have record disabled */
4325         iter->head_page = cpu_buffer->reader_page;
4326         iter->head = cpu_buffer->reader_page->read;
4327         iter->next_event = iter->head;
4328
4329         iter->cache_reader_page = iter->head_page;
4330         iter->cache_read = cpu_buffer->read;
4331         iter->cache_pages_removed = cpu_buffer->pages_removed;
4332
4333         if (iter->head) {
4334                 iter->read_stamp = cpu_buffer->read_stamp;
4335                 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4336         } else {
4337                 iter->read_stamp = iter->head_page->page->time_stamp;
4338                 iter->page_stamp = iter->read_stamp;
4339         }
4340 }
4341
4342 /**
4343  * ring_buffer_iter_reset - reset an iterator
4344  * @iter: The iterator to reset
4345  *
4346  * Resets the iterator, so that it will start from the beginning
4347  * again.
4348  */
4349 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4350 {
4351         struct ring_buffer_per_cpu *cpu_buffer;
4352         unsigned long flags;
4353
4354         if (!iter)
4355                 return;
4356
4357         cpu_buffer = iter->cpu_buffer;
4358
4359         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4360         rb_iter_reset(iter);
4361         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4362 }
4363 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4364
4365 /**
4366  * ring_buffer_iter_empty - check if an iterator has no more to read
4367  * @iter: The iterator to check
4368  */
4369 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4370 {
4371         struct ring_buffer_per_cpu *cpu_buffer;
4372         struct buffer_page *reader;
4373         struct buffer_page *head_page;
4374         struct buffer_page *commit_page;
4375         struct buffer_page *curr_commit_page;
4376         unsigned commit;
4377         u64 curr_commit_ts;
4378         u64 commit_ts;
4379
4380         cpu_buffer = iter->cpu_buffer;
4381         reader = cpu_buffer->reader_page;
4382         head_page = cpu_buffer->head_page;
4383         commit_page = cpu_buffer->commit_page;
4384         commit_ts = commit_page->page->time_stamp;
4385
4386         /*
4387          * When the writer goes across pages, it issues a cmpxchg which
4388          * is a mb(), which will synchronize with the rmb here.
4389          * (see rb_tail_page_update())
4390          */
4391         smp_rmb();
4392         commit = rb_page_commit(commit_page);
4393         /* We want to make sure that the commit page doesn't change */
4394         smp_rmb();
4395
4396         /* Make sure commit page didn't change */
4397         curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4398         curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4399
4400         /* If the commit page changed, then there's more data */
4401         if (curr_commit_page != commit_page ||
4402             curr_commit_ts != commit_ts)
4403                 return 0;
4404
4405         /* Still racy, as it may return a false positive, but that's OK */
4406         return ((iter->head_page == commit_page && iter->head >= commit) ||
4407                 (iter->head_page == reader && commit_page == head_page &&
4408                  head_page->read == commit &&
4409                  iter->head == rb_page_commit(cpu_buffer->reader_page)));
4410 }
4411 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4412
4413 static void
4414 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4415                      struct ring_buffer_event *event)
4416 {
4417         u64 delta;
4418
4419         switch (event->type_len) {
4420         case RINGBUF_TYPE_PADDING:
4421                 return;
4422
4423         case RINGBUF_TYPE_TIME_EXTEND:
4424                 delta = rb_event_time_stamp(event);
4425                 cpu_buffer->read_stamp += delta;
4426                 return;
4427
4428         case RINGBUF_TYPE_TIME_STAMP:
4429                 delta = rb_event_time_stamp(event);
4430                 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4431                 cpu_buffer->read_stamp = delta;
4432                 return;
4433
4434         case RINGBUF_TYPE_DATA:
4435                 cpu_buffer->read_stamp += event->time_delta;
4436                 return;
4437
4438         default:
4439                 RB_WARN_ON(cpu_buffer, 1);
4440         }
4441 }
4442
4443 static void
4444 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4445                           struct ring_buffer_event *event)
4446 {
4447         u64 delta;
4448
4449         switch (event->type_len) {
4450         case RINGBUF_TYPE_PADDING:
4451                 return;
4452
4453         case RINGBUF_TYPE_TIME_EXTEND:
4454                 delta = rb_event_time_stamp(event);
4455                 iter->read_stamp += delta;
4456                 return;
4457
4458         case RINGBUF_TYPE_TIME_STAMP:
4459                 delta = rb_event_time_stamp(event);
4460                 delta = rb_fix_abs_ts(delta, iter->read_stamp);
4461                 iter->read_stamp = delta;
4462                 return;
4463
4464         case RINGBUF_TYPE_DATA:
4465                 iter->read_stamp += event->time_delta;
4466                 return;
4467
4468         default:
4469                 RB_WARN_ON(iter->cpu_buffer, 1);
4470         }
4471 }
4472
4473 static struct buffer_page *
4474 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4475 {
4476         struct buffer_page *reader = NULL;
4477         unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
4478         unsigned long overwrite;
4479         unsigned long flags;
4480         int nr_loops = 0;
4481         bool ret;
4482
4483         local_irq_save(flags);
4484         arch_spin_lock(&cpu_buffer->lock);
4485
4486  again:
4487         /*
4488          * This should normally only loop twice. But because the
4489          * start of the reader inserts an empty page, it causes
4490          * a case where we will loop three times. There should be no
4491          * reason to loop four times (that I know of).
4492          */
4493         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4494                 reader = NULL;
4495                 goto out;
4496         }
4497
4498         reader = cpu_buffer->reader_page;
4499
4500         /* If there's more to read, return this page */
4501         if (cpu_buffer->reader_page->read < rb_page_size(reader))
4502                 goto out;
4503
4504         /* Never should we have an index greater than the size */
4505         if (RB_WARN_ON(cpu_buffer,
4506                        cpu_buffer->reader_page->read > rb_page_size(reader)))
4507                 goto out;
4508
4509         /* check if we caught up to the tail */
4510         reader = NULL;
4511         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4512                 goto out;
4513
4514         /* Don't bother swapping if the ring buffer is empty */
4515         if (rb_num_of_entries(cpu_buffer) == 0)
4516                 goto out;
4517
4518         /*
4519          * Reset the reader page to size zero.
4520          */
4521         local_set(&cpu_buffer->reader_page->write, 0);
4522         local_set(&cpu_buffer->reader_page->entries, 0);
4523         local_set(&cpu_buffer->reader_page->page->commit, 0);
4524         cpu_buffer->reader_page->real_end = 0;
4525
4526  spin:
4527         /*
4528          * Splice the empty reader page into the list around the head.
4529          */
4530         reader = rb_set_head_page(cpu_buffer);
4531         if (!reader)
4532                 goto out;
4533         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4534         cpu_buffer->reader_page->list.prev = reader->list.prev;
4535
4536         /*
4537          * cpu_buffer->pages just needs to point to the buffer, it
4538          *  has no specific buffer page to point to. Lets move it out
4539          *  of our way so we don't accidentally swap it.
4540          */
4541         cpu_buffer->pages = reader->list.prev;
4542
4543         /* The reader page will be pointing to the new head */
4544         rb_set_list_to_head(&cpu_buffer->reader_page->list);
4545
4546         /*
4547          * We want to make sure we read the overruns after we set up our
4548          * pointers to the next object. The writer side does a
4549          * cmpxchg to cross pages which acts as the mb on the writer
4550          * side. Note, the reader will constantly fail the swap
4551          * while the writer is updating the pointers, so this
4552          * guarantees that the overwrite recorded here is the one we
4553          * want to compare with the last_overrun.
4554          */
4555         smp_mb();
4556         overwrite = local_read(&(cpu_buffer->overrun));
4557
4558         /*
4559          * Here's the tricky part.
4560          *
4561          * We need to move the pointer past the header page.
4562          * But we can only do that if a writer is not currently
4563          * moving it. The page before the header page has the
4564          * flag bit '1' set if it is pointing to the page we want.
4565          * but if the writer is in the process of moving it
4566          * than it will be '2' or already moved '0'.
4567          */
4568
4569         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4570
4571         /*
4572          * If we did not convert it, then we must try again.
4573          */
4574         if (!ret)
4575                 goto spin;
4576
4577         /*
4578          * Yay! We succeeded in replacing the page.
4579          *
4580          * Now make the new head point back to the reader page.
4581          */
4582         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4583         rb_inc_page(&cpu_buffer->head_page);
4584
4585         local_inc(&cpu_buffer->pages_read);
4586
4587         /* Finally update the reader page to the new head */
4588         cpu_buffer->reader_page = reader;
4589         cpu_buffer->reader_page->read = 0;
4590
4591         if (overwrite != cpu_buffer->last_overrun) {
4592                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4593                 cpu_buffer->last_overrun = overwrite;
4594         }
4595
4596         goto again;
4597
4598  out:
4599         /* Update the read_stamp on the first event */
4600         if (reader && reader->read == 0)
4601                 cpu_buffer->read_stamp = reader->page->time_stamp;
4602
4603         arch_spin_unlock(&cpu_buffer->lock);
4604         local_irq_restore(flags);
4605
4606         /*
4607          * The writer has preempt disable, wait for it. But not forever
4608          * Although, 1 second is pretty much "forever"
4609          */
4610 #define USECS_WAIT      1000000
4611         for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4612                 /* If the write is past the end of page, a writer is still updating it */
4613                 if (likely(!reader || rb_page_write(reader) <= bsize))
4614                         break;
4615
4616                 udelay(1);
4617
4618                 /* Get the latest version of the reader write value */
4619                 smp_rmb();
4620         }
4621
4622         /* The writer is not moving forward? Something is wrong */
4623         if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4624                 reader = NULL;
4625
4626         /*
4627          * Make sure we see any padding after the write update
4628          * (see rb_reset_tail()).
4629          *
4630          * In addition, a writer may be writing on the reader page
4631          * if the page has not been fully filled, so the read barrier
4632          * is also needed to make sure we see the content of what is
4633          * committed by the writer (see rb_set_commit_to_write()).
4634          */
4635         smp_rmb();
4636
4637
4638         return reader;
4639 }
4640
4641 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4642 {
4643         struct ring_buffer_event *event;
4644         struct buffer_page *reader;
4645         unsigned length;
4646
4647         reader = rb_get_reader_page(cpu_buffer);
4648
4649         /* This function should not be called when buffer is empty */
4650         if (RB_WARN_ON(cpu_buffer, !reader))
4651                 return;
4652
4653         event = rb_reader_event(cpu_buffer);
4654
4655         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4656                 cpu_buffer->read++;
4657
4658         rb_update_read_stamp(cpu_buffer, event);
4659
4660         length = rb_event_length(event);
4661         cpu_buffer->reader_page->read += length;
4662         cpu_buffer->read_bytes += length;
4663 }
4664
4665 static void rb_advance_iter(struct ring_buffer_iter *iter)
4666 {
4667         struct ring_buffer_per_cpu *cpu_buffer;
4668
4669         cpu_buffer = iter->cpu_buffer;
4670
4671         /* If head == next_event then we need to jump to the next event */
4672         if (iter->head == iter->next_event) {
4673                 /* If the event gets overwritten again, there's nothing to do */
4674                 if (rb_iter_head_event(iter) == NULL)
4675                         return;
4676         }
4677
4678         iter->head = iter->next_event;
4679
4680         /*
4681          * Check if we are at the end of the buffer.
4682          */
4683         if (iter->next_event >= rb_page_size(iter->head_page)) {
4684                 /* discarded commits can make the page empty */
4685                 if (iter->head_page == cpu_buffer->commit_page)
4686                         return;
4687                 rb_inc_iter(iter);
4688                 return;
4689         }
4690
4691         rb_update_iter_read_stamp(iter, iter->event);
4692 }
4693
4694 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4695 {
4696         return cpu_buffer->lost_events;
4697 }
4698
4699 static struct ring_buffer_event *
4700 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4701                unsigned long *lost_events)
4702 {
4703         struct ring_buffer_event *event;
4704         struct buffer_page *reader;
4705         int nr_loops = 0;
4706
4707         if (ts)
4708                 *ts = 0;
4709  again:
4710         /*
4711          * We repeat when a time extend is encountered.
4712          * Since the time extend is always attached to a data event,
4713          * we should never loop more than once.
4714          * (We never hit the following condition more than twice).
4715          */
4716         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4717                 return NULL;
4718
4719         reader = rb_get_reader_page(cpu_buffer);
4720         if (!reader)
4721                 return NULL;
4722
4723         event = rb_reader_event(cpu_buffer);
4724
4725         switch (event->type_len) {
4726         case RINGBUF_TYPE_PADDING:
4727                 if (rb_null_event(event))
4728                         RB_WARN_ON(cpu_buffer, 1);
4729                 /*
4730                  * Because the writer could be discarding every
4731                  * event it creates (which would probably be bad)
4732                  * if we were to go back to "again" then we may never
4733                  * catch up, and will trigger the warn on, or lock
4734                  * the box. Return the padding, and we will release
4735                  * the current locks, and try again.
4736                  */
4737                 return event;
4738
4739         case RINGBUF_TYPE_TIME_EXTEND:
4740                 /* Internal data, OK to advance */
4741                 rb_advance_reader(cpu_buffer);
4742                 goto again;
4743
4744         case RINGBUF_TYPE_TIME_STAMP:
4745                 if (ts) {
4746                         *ts = rb_event_time_stamp(event);
4747                         *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4748                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4749                                                          cpu_buffer->cpu, ts);
4750                 }
4751                 /* Internal data, OK to advance */
4752                 rb_advance_reader(cpu_buffer);
4753                 goto again;
4754
4755         case RINGBUF_TYPE_DATA:
4756                 if (ts && !(*ts)) {
4757                         *ts = cpu_buffer->read_stamp + event->time_delta;
4758                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4759                                                          cpu_buffer->cpu, ts);
4760                 }
4761                 if (lost_events)
4762                         *lost_events = rb_lost_events(cpu_buffer);
4763                 return event;
4764
4765         default:
4766                 RB_WARN_ON(cpu_buffer, 1);
4767         }
4768
4769         return NULL;
4770 }
4771 EXPORT_SYMBOL_GPL(ring_buffer_peek);
4772
4773 static struct ring_buffer_event *
4774 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4775 {
4776         struct trace_buffer *buffer;
4777         struct ring_buffer_per_cpu *cpu_buffer;
4778         struct ring_buffer_event *event;
4779         int nr_loops = 0;
4780
4781         if (ts)
4782                 *ts = 0;
4783
4784         cpu_buffer = iter->cpu_buffer;
4785         buffer = cpu_buffer->buffer;
4786
4787         /*
4788          * Check if someone performed a consuming read to the buffer
4789          * or removed some pages from the buffer. In these cases,
4790          * iterator was invalidated and we need to reset it.
4791          */
4792         if (unlikely(iter->cache_read != cpu_buffer->read ||
4793                      iter->cache_reader_page != cpu_buffer->reader_page ||
4794                      iter->cache_pages_removed != cpu_buffer->pages_removed))
4795                 rb_iter_reset(iter);
4796
4797  again:
4798         if (ring_buffer_iter_empty(iter))
4799                 return NULL;
4800
4801         /*
4802          * As the writer can mess with what the iterator is trying
4803          * to read, just give up if we fail to get an event after
4804          * three tries. The iterator is not as reliable when reading
4805          * the ring buffer with an active write as the consumer is.
4806          * Do not warn if the three failures is reached.
4807          */
4808         if (++nr_loops > 3)
4809                 return NULL;
4810
4811         if (rb_per_cpu_empty(cpu_buffer))
4812                 return NULL;
4813
4814         if (iter->head >= rb_page_size(iter->head_page)) {
4815                 rb_inc_iter(iter);
4816                 goto again;
4817         }
4818
4819         event = rb_iter_head_event(iter);
4820         if (!event)
4821                 goto again;
4822
4823         switch (event->type_len) {
4824         case RINGBUF_TYPE_PADDING:
4825                 if (rb_null_event(event)) {
4826                         rb_inc_iter(iter);
4827                         goto again;
4828                 }
4829                 rb_advance_iter(iter);
4830                 return event;
4831
4832         case RINGBUF_TYPE_TIME_EXTEND:
4833                 /* Internal data, OK to advance */
4834                 rb_advance_iter(iter);
4835                 goto again;
4836
4837         case RINGBUF_TYPE_TIME_STAMP:
4838                 if (ts) {
4839                         *ts = rb_event_time_stamp(event);
4840                         *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4841                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4842                                                          cpu_buffer->cpu, ts);
4843                 }
4844                 /* Internal data, OK to advance */
4845                 rb_advance_iter(iter);
4846                 goto again;
4847
4848         case RINGBUF_TYPE_DATA:
4849                 if (ts && !(*ts)) {
4850                         *ts = iter->read_stamp + event->time_delta;
4851                         ring_buffer_normalize_time_stamp(buffer,
4852                                                          cpu_buffer->cpu, ts);
4853                 }
4854                 return event;
4855
4856         default:
4857                 RB_WARN_ON(cpu_buffer, 1);
4858         }
4859
4860         return NULL;
4861 }
4862 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4863
4864 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4865 {
4866         if (likely(!in_nmi())) {
4867                 raw_spin_lock(&cpu_buffer->reader_lock);
4868                 return true;
4869         }
4870
4871         /*
4872          * If an NMI die dumps out the content of the ring buffer
4873          * trylock must be used to prevent a deadlock if the NMI
4874          * preempted a task that holds the ring buffer locks. If
4875          * we get the lock then all is fine, if not, then continue
4876          * to do the read, but this can corrupt the ring buffer,
4877          * so it must be permanently disabled from future writes.
4878          * Reading from NMI is a oneshot deal.
4879          */
4880         if (raw_spin_trylock(&cpu_buffer->reader_lock))
4881                 return true;
4882
4883         /* Continue without locking, but disable the ring buffer */
4884         atomic_inc(&cpu_buffer->record_disabled);
4885         return false;
4886 }
4887
4888 static inline void
4889 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4890 {
4891         if (likely(locked))
4892                 raw_spin_unlock(&cpu_buffer->reader_lock);
4893 }
4894
4895 /**
4896  * ring_buffer_peek - peek at the next event to be read
4897  * @buffer: The ring buffer to read
4898  * @cpu: The cpu to peak at
4899  * @ts: The timestamp counter of this event.
4900  * @lost_events: a variable to store if events were lost (may be NULL)
4901  *
4902  * This will return the event that will be read next, but does
4903  * not consume the data.
4904  */
4905 struct ring_buffer_event *
4906 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4907                  unsigned long *lost_events)
4908 {
4909         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4910         struct ring_buffer_event *event;
4911         unsigned long flags;
4912         bool dolock;
4913
4914         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4915                 return NULL;
4916
4917  again:
4918         local_irq_save(flags);
4919         dolock = rb_reader_lock(cpu_buffer);
4920         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4921         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4922                 rb_advance_reader(cpu_buffer);
4923         rb_reader_unlock(cpu_buffer, dolock);
4924         local_irq_restore(flags);
4925
4926         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4927                 goto again;
4928
4929         return event;
4930 }
4931
4932 /** ring_buffer_iter_dropped - report if there are dropped events
4933  * @iter: The ring buffer iterator
4934  *
4935  * Returns true if there was dropped events since the last peek.
4936  */
4937 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4938 {
4939         bool ret = iter->missed_events != 0;
4940
4941         iter->missed_events = 0;
4942         return ret;
4943 }
4944 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4945
4946 /**
4947  * ring_buffer_iter_peek - peek at the next event to be read
4948  * @iter: The ring buffer iterator
4949  * @ts: The timestamp counter of this event.
4950  *
4951  * This will return the event that will be read next, but does
4952  * not increment the iterator.
4953  */
4954 struct ring_buffer_event *
4955 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4956 {
4957         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4958         struct ring_buffer_event *event;
4959         unsigned long flags;
4960
4961  again:
4962         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4963         event = rb_iter_peek(iter, ts);
4964         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4965
4966         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4967                 goto again;
4968
4969         return event;
4970 }
4971
4972 /**
4973  * ring_buffer_consume - return an event and consume it
4974  * @buffer: The ring buffer to get the next event from
4975  * @cpu: the cpu to read the buffer from
4976  * @ts: a variable to store the timestamp (may be NULL)
4977  * @lost_events: a variable to store if events were lost (may be NULL)
4978  *
4979  * Returns the next event in the ring buffer, and that event is consumed.
4980  * Meaning, that sequential reads will keep returning a different event,
4981  * and eventually empty the ring buffer if the producer is slower.
4982  */
4983 struct ring_buffer_event *
4984 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
4985                     unsigned long *lost_events)
4986 {
4987         struct ring_buffer_per_cpu *cpu_buffer;
4988         struct ring_buffer_event *event = NULL;
4989         unsigned long flags;
4990         bool dolock;
4991
4992  again:
4993         /* might be called in atomic */
4994         preempt_disable();
4995
4996         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4997                 goto out;
4998
4999         cpu_buffer = buffer->buffers[cpu];
5000         local_irq_save(flags);
5001         dolock = rb_reader_lock(cpu_buffer);
5002
5003         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5004         if (event) {
5005                 cpu_buffer->lost_events = 0;
5006                 rb_advance_reader(cpu_buffer);
5007         }
5008
5009         rb_reader_unlock(cpu_buffer, dolock);
5010         local_irq_restore(flags);
5011
5012  out:
5013         preempt_enable();
5014
5015         if (event && event->type_len == RINGBUF_TYPE_PADDING)
5016                 goto again;
5017
5018         return event;
5019 }
5020 EXPORT_SYMBOL_GPL(ring_buffer_consume);
5021
5022 /**
5023  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5024  * @buffer: The ring buffer to read from
5025  * @cpu: The cpu buffer to iterate over
5026  * @flags: gfp flags to use for memory allocation
5027  *
5028  * This performs the initial preparations necessary to iterate
5029  * through the buffer.  Memory is allocated, buffer recording
5030  * is disabled, and the iterator pointer is returned to the caller.
5031  *
5032  * Disabling buffer recording prevents the reading from being
5033  * corrupted. This is not a consuming read, so a producer is not
5034  * expected.
5035  *
5036  * After a sequence of ring_buffer_read_prepare calls, the user is
5037  * expected to make at least one call to ring_buffer_read_prepare_sync.
5038  * Afterwards, ring_buffer_read_start is invoked to get things going
5039  * for real.
5040  *
5041  * This overall must be paired with ring_buffer_read_finish.
5042  */
5043 struct ring_buffer_iter *
5044 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5045 {
5046         struct ring_buffer_per_cpu *cpu_buffer;
5047         struct ring_buffer_iter *iter;
5048
5049         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5050                 return NULL;
5051
5052         iter = kzalloc(sizeof(*iter), flags);
5053         if (!iter)
5054                 return NULL;
5055
5056         /* Holds the entire event: data and meta data */
5057         iter->event_size = buffer->subbuf_size;
5058         iter->event = kmalloc(iter->event_size, flags);
5059         if (!iter->event) {
5060                 kfree(iter);
5061                 return NULL;
5062         }
5063
5064         cpu_buffer = buffer->buffers[cpu];
5065
5066         iter->cpu_buffer = cpu_buffer;
5067
5068         atomic_inc(&cpu_buffer->resize_disabled);
5069
5070         return iter;
5071 }
5072 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5073
5074 /**
5075  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5076  *
5077  * All previously invoked ring_buffer_read_prepare calls to prepare
5078  * iterators will be synchronized.  Afterwards, read_buffer_read_start
5079  * calls on those iterators are allowed.
5080  */
5081 void
5082 ring_buffer_read_prepare_sync(void)
5083 {
5084         synchronize_rcu();
5085 }
5086 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5087
5088 /**
5089  * ring_buffer_read_start - start a non consuming read of the buffer
5090  * @iter: The iterator returned by ring_buffer_read_prepare
5091  *
5092  * This finalizes the startup of an iteration through the buffer.
5093  * The iterator comes from a call to ring_buffer_read_prepare and
5094  * an intervening ring_buffer_read_prepare_sync must have been
5095  * performed.
5096  *
5097  * Must be paired with ring_buffer_read_finish.
5098  */
5099 void
5100 ring_buffer_read_start(struct ring_buffer_iter *iter)
5101 {
5102         struct ring_buffer_per_cpu *cpu_buffer;
5103         unsigned long flags;
5104
5105         if (!iter)
5106                 return;
5107
5108         cpu_buffer = iter->cpu_buffer;
5109
5110         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5111         arch_spin_lock(&cpu_buffer->lock);
5112         rb_iter_reset(iter);
5113         arch_spin_unlock(&cpu_buffer->lock);
5114         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5115 }
5116 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5117
5118 /**
5119  * ring_buffer_read_finish - finish reading the iterator of the buffer
5120  * @iter: The iterator retrieved by ring_buffer_start
5121  *
5122  * This re-enables the recording to the buffer, and frees the
5123  * iterator.
5124  */
5125 void
5126 ring_buffer_read_finish(struct ring_buffer_iter *iter)
5127 {
5128         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5129         unsigned long flags;
5130
5131         /*
5132          * Ring buffer is disabled from recording, here's a good place
5133          * to check the integrity of the ring buffer.
5134          * Must prevent readers from trying to read, as the check
5135          * clears the HEAD page and readers require it.
5136          */
5137         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5138         rb_check_pages(cpu_buffer);
5139         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5140
5141         atomic_dec(&cpu_buffer->resize_disabled);
5142         kfree(iter->event);
5143         kfree(iter);
5144 }
5145 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5146
5147 /**
5148  * ring_buffer_iter_advance - advance the iterator to the next location
5149  * @iter: The ring buffer iterator
5150  *
5151  * Move the location of the iterator such that the next read will
5152  * be the next location of the iterator.
5153  */
5154 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5155 {
5156         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5157         unsigned long flags;
5158
5159         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5160
5161         rb_advance_iter(iter);
5162
5163         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5164 }
5165 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5166
5167 /**
5168  * ring_buffer_size - return the size of the ring buffer (in bytes)
5169  * @buffer: The ring buffer.
5170  * @cpu: The CPU to get ring buffer size from.
5171  */
5172 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5173 {
5174         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5175                 return 0;
5176
5177         return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5178 }
5179 EXPORT_SYMBOL_GPL(ring_buffer_size);
5180
5181 /**
5182  * ring_buffer_max_event_size - return the max data size of an event
5183  * @buffer: The ring buffer.
5184  *
5185  * Returns the maximum size an event can be.
5186  */
5187 unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5188 {
5189         /* If abs timestamp is requested, events have a timestamp too */
5190         if (ring_buffer_time_stamp_abs(buffer))
5191                 return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5192         return buffer->max_data_size;
5193 }
5194 EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5195
5196 static void rb_clear_buffer_page(struct buffer_page *page)
5197 {
5198         local_set(&page->write, 0);
5199         local_set(&page->entries, 0);
5200         rb_init_page(page->page);
5201         page->read = 0;
5202 }
5203
5204 static void
5205 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5206 {
5207         struct buffer_page *page;
5208
5209         rb_head_page_deactivate(cpu_buffer);
5210
5211         cpu_buffer->head_page
5212                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
5213         rb_clear_buffer_page(cpu_buffer->head_page);
5214         list_for_each_entry(page, cpu_buffer->pages, list) {
5215                 rb_clear_buffer_page(page);
5216         }
5217
5218         cpu_buffer->tail_page = cpu_buffer->head_page;
5219         cpu_buffer->commit_page = cpu_buffer->head_page;
5220
5221         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5222         INIT_LIST_HEAD(&cpu_buffer->new_pages);
5223         rb_clear_buffer_page(cpu_buffer->reader_page);
5224
5225         local_set(&cpu_buffer->entries_bytes, 0);
5226         local_set(&cpu_buffer->overrun, 0);
5227         local_set(&cpu_buffer->commit_overrun, 0);
5228         local_set(&cpu_buffer->dropped_events, 0);
5229         local_set(&cpu_buffer->entries, 0);
5230         local_set(&cpu_buffer->committing, 0);
5231         local_set(&cpu_buffer->commits, 0);
5232         local_set(&cpu_buffer->pages_touched, 0);
5233         local_set(&cpu_buffer->pages_lost, 0);
5234         local_set(&cpu_buffer->pages_read, 0);
5235         cpu_buffer->last_pages_touch = 0;
5236         cpu_buffer->shortest_full = 0;
5237         cpu_buffer->read = 0;
5238         cpu_buffer->read_bytes = 0;
5239
5240         rb_time_set(&cpu_buffer->write_stamp, 0);
5241         rb_time_set(&cpu_buffer->before_stamp, 0);
5242
5243         memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5244
5245         cpu_buffer->lost_events = 0;
5246         cpu_buffer->last_overrun = 0;
5247
5248         rb_head_page_activate(cpu_buffer);
5249         cpu_buffer->pages_removed = 0;
5250 }
5251
5252 /* Must have disabled the cpu buffer then done a synchronize_rcu */
5253 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5254 {
5255         unsigned long flags;
5256
5257         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5258
5259         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5260                 goto out;
5261
5262         arch_spin_lock(&cpu_buffer->lock);
5263
5264         rb_reset_cpu(cpu_buffer);
5265
5266         arch_spin_unlock(&cpu_buffer->lock);
5267
5268  out:
5269         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5270 }
5271
5272 /**
5273  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5274  * @buffer: The ring buffer to reset a per cpu buffer of
5275  * @cpu: The CPU buffer to be reset
5276  */
5277 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5278 {
5279         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5280
5281         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5282                 return;
5283
5284         /* prevent another thread from changing buffer sizes */
5285         mutex_lock(&buffer->mutex);
5286
5287         atomic_inc(&cpu_buffer->resize_disabled);
5288         atomic_inc(&cpu_buffer->record_disabled);
5289
5290         /* Make sure all commits have finished */
5291         synchronize_rcu();
5292
5293         reset_disabled_cpu_buffer(cpu_buffer);
5294
5295         atomic_dec(&cpu_buffer->record_disabled);
5296         atomic_dec(&cpu_buffer->resize_disabled);
5297
5298         mutex_unlock(&buffer->mutex);
5299 }
5300 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5301
5302 /* Flag to ensure proper resetting of atomic variables */
5303 #define RESET_BIT       (1 << 30)
5304
5305 /**
5306  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5307  * @buffer: The ring buffer to reset a per cpu buffer of
5308  */
5309 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5310 {
5311         struct ring_buffer_per_cpu *cpu_buffer;
5312         int cpu;
5313
5314         /* prevent another thread from changing buffer sizes */
5315         mutex_lock(&buffer->mutex);
5316
5317         for_each_online_buffer_cpu(buffer, cpu) {
5318                 cpu_buffer = buffer->buffers[cpu];
5319
5320                 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
5321                 atomic_inc(&cpu_buffer->record_disabled);
5322         }
5323
5324         /* Make sure all commits have finished */
5325         synchronize_rcu();
5326
5327         for_each_buffer_cpu(buffer, cpu) {
5328                 cpu_buffer = buffer->buffers[cpu];
5329
5330                 /*
5331                  * If a CPU came online during the synchronize_rcu(), then
5332                  * ignore it.
5333                  */
5334                 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5335                         continue;
5336
5337                 reset_disabled_cpu_buffer(cpu_buffer);
5338
5339                 atomic_dec(&cpu_buffer->record_disabled);
5340                 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
5341         }
5342
5343         mutex_unlock(&buffer->mutex);
5344 }
5345
5346 /**
5347  * ring_buffer_reset - reset a ring buffer
5348  * @buffer: The ring buffer to reset all cpu buffers
5349  */
5350 void ring_buffer_reset(struct trace_buffer *buffer)
5351 {
5352         struct ring_buffer_per_cpu *cpu_buffer;
5353         int cpu;
5354
5355         /* prevent another thread from changing buffer sizes */
5356         mutex_lock(&buffer->mutex);
5357
5358         for_each_buffer_cpu(buffer, cpu) {
5359                 cpu_buffer = buffer->buffers[cpu];
5360
5361                 atomic_inc(&cpu_buffer->resize_disabled);
5362                 atomic_inc(&cpu_buffer->record_disabled);
5363         }
5364
5365         /* Make sure all commits have finished */
5366         synchronize_rcu();
5367
5368         for_each_buffer_cpu(buffer, cpu) {
5369                 cpu_buffer = buffer->buffers[cpu];
5370
5371                 reset_disabled_cpu_buffer(cpu_buffer);
5372
5373                 atomic_dec(&cpu_buffer->record_disabled);
5374                 atomic_dec(&cpu_buffer->resize_disabled);
5375         }
5376
5377         mutex_unlock(&buffer->mutex);
5378 }
5379 EXPORT_SYMBOL_GPL(ring_buffer_reset);
5380
5381 /**
5382  * ring_buffer_empty - is the ring buffer empty?
5383  * @buffer: The ring buffer to test
5384  */
5385 bool ring_buffer_empty(struct trace_buffer *buffer)
5386 {
5387         struct ring_buffer_per_cpu *cpu_buffer;
5388         unsigned long flags;
5389         bool dolock;
5390         bool ret;
5391         int cpu;
5392
5393         /* yes this is racy, but if you don't like the race, lock the buffer */
5394         for_each_buffer_cpu(buffer, cpu) {
5395                 cpu_buffer = buffer->buffers[cpu];
5396                 local_irq_save(flags);
5397                 dolock = rb_reader_lock(cpu_buffer);
5398                 ret = rb_per_cpu_empty(cpu_buffer);
5399                 rb_reader_unlock(cpu_buffer, dolock);
5400                 local_irq_restore(flags);
5401
5402                 if (!ret)
5403                         return false;
5404         }
5405
5406         return true;
5407 }
5408 EXPORT_SYMBOL_GPL(ring_buffer_empty);
5409
5410 /**
5411  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5412  * @buffer: The ring buffer
5413  * @cpu: The CPU buffer to test
5414  */
5415 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5416 {
5417         struct ring_buffer_per_cpu *cpu_buffer;
5418         unsigned long flags;
5419         bool dolock;
5420         bool ret;
5421
5422         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5423                 return true;
5424
5425         cpu_buffer = buffer->buffers[cpu];
5426         local_irq_save(flags);
5427         dolock = rb_reader_lock(cpu_buffer);
5428         ret = rb_per_cpu_empty(cpu_buffer);
5429         rb_reader_unlock(cpu_buffer, dolock);
5430         local_irq_restore(flags);
5431
5432         return ret;
5433 }
5434 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5435
5436 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5437 /**
5438  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5439  * @buffer_a: One buffer to swap with
5440  * @buffer_b: The other buffer to swap with
5441  * @cpu: the CPU of the buffers to swap
5442  *
5443  * This function is useful for tracers that want to take a "snapshot"
5444  * of a CPU buffer and has another back up buffer lying around.
5445  * it is expected that the tracer handles the cpu buffer not being
5446  * used at the moment.
5447  */
5448 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5449                          struct trace_buffer *buffer_b, int cpu)
5450 {
5451         struct ring_buffer_per_cpu *cpu_buffer_a;
5452         struct ring_buffer_per_cpu *cpu_buffer_b;
5453         int ret = -EINVAL;
5454
5455         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5456             !cpumask_test_cpu(cpu, buffer_b->cpumask))
5457                 goto out;
5458
5459         cpu_buffer_a = buffer_a->buffers[cpu];
5460         cpu_buffer_b = buffer_b->buffers[cpu];
5461
5462         /* At least make sure the two buffers are somewhat the same */
5463         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5464                 goto out;
5465
5466         if (buffer_a->subbuf_order != buffer_b->subbuf_order)
5467                 goto out;
5468
5469         ret = -EAGAIN;
5470
5471         if (atomic_read(&buffer_a->record_disabled))
5472                 goto out;
5473
5474         if (atomic_read(&buffer_b->record_disabled))
5475                 goto out;
5476
5477         if (atomic_read(&cpu_buffer_a->record_disabled))
5478                 goto out;
5479
5480         if (atomic_read(&cpu_buffer_b->record_disabled))
5481                 goto out;
5482
5483         /*
5484          * We can't do a synchronize_rcu here because this
5485          * function can be called in atomic context.
5486          * Normally this will be called from the same CPU as cpu.
5487          * If not it's up to the caller to protect this.
5488          */
5489         atomic_inc(&cpu_buffer_a->record_disabled);
5490         atomic_inc(&cpu_buffer_b->record_disabled);
5491
5492         ret = -EBUSY;
5493         if (local_read(&cpu_buffer_a->committing))
5494                 goto out_dec;
5495         if (local_read(&cpu_buffer_b->committing))
5496                 goto out_dec;
5497
5498         /*
5499          * When resize is in progress, we cannot swap it because
5500          * it will mess the state of the cpu buffer.
5501          */
5502         if (atomic_read(&buffer_a->resizing))
5503                 goto out_dec;
5504         if (atomic_read(&buffer_b->resizing))
5505                 goto out_dec;
5506
5507         buffer_a->buffers[cpu] = cpu_buffer_b;
5508         buffer_b->buffers[cpu] = cpu_buffer_a;
5509
5510         cpu_buffer_b->buffer = buffer_a;
5511         cpu_buffer_a->buffer = buffer_b;
5512
5513         ret = 0;
5514
5515 out_dec:
5516         atomic_dec(&cpu_buffer_a->record_disabled);
5517         atomic_dec(&cpu_buffer_b->record_disabled);
5518 out:
5519         return ret;
5520 }
5521 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5522 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5523
5524 /**
5525  * ring_buffer_alloc_read_page - allocate a page to read from buffer
5526  * @buffer: the buffer to allocate for.
5527  * @cpu: the cpu buffer to allocate.
5528  *
5529  * This function is used in conjunction with ring_buffer_read_page.
5530  * When reading a full page from the ring buffer, these functions
5531  * can be used to speed up the process. The calling function should
5532  * allocate a few pages first with this function. Then when it
5533  * needs to get pages from the ring buffer, it passes the result
5534  * of this function into ring_buffer_read_page, which will swap
5535  * the page that was allocated, with the read page of the buffer.
5536  *
5537  * Returns:
5538  *  The page allocated, or ERR_PTR
5539  */
5540 struct buffer_data_read_page *
5541 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5542 {
5543         struct ring_buffer_per_cpu *cpu_buffer;
5544         struct buffer_data_read_page *bpage = NULL;
5545         unsigned long flags;
5546         struct page *page;
5547
5548         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5549                 return ERR_PTR(-ENODEV);
5550
5551         bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
5552         if (!bpage)
5553                 return ERR_PTR(-ENOMEM);
5554
5555         bpage->order = buffer->subbuf_order;
5556         cpu_buffer = buffer->buffers[cpu];
5557         local_irq_save(flags);
5558         arch_spin_lock(&cpu_buffer->lock);
5559
5560         if (cpu_buffer->free_page) {
5561                 bpage->data = cpu_buffer->free_page;
5562                 cpu_buffer->free_page = NULL;
5563         }
5564
5565         arch_spin_unlock(&cpu_buffer->lock);
5566         local_irq_restore(flags);
5567
5568         if (bpage->data)
5569                 goto out;
5570
5571         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
5572                                 cpu_buffer->buffer->subbuf_order);
5573         if (!page) {
5574                 kfree(bpage);
5575                 return ERR_PTR(-ENOMEM);
5576         }
5577
5578         bpage->data = page_address(page);
5579
5580  out:
5581         rb_init_page(bpage->data);
5582
5583         return bpage;
5584 }
5585 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5586
5587 /**
5588  * ring_buffer_free_read_page - free an allocated read page
5589  * @buffer: the buffer the page was allocate for
5590  * @cpu: the cpu buffer the page came from
5591  * @data_page: the page to free
5592  *
5593  * Free a page allocated from ring_buffer_alloc_read_page.
5594  */
5595 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
5596                                 struct buffer_data_read_page *data_page)
5597 {
5598         struct ring_buffer_per_cpu *cpu_buffer;
5599         struct buffer_data_page *bpage = data_page->data;
5600         struct page *page = virt_to_page(bpage);
5601         unsigned long flags;
5602
5603         if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5604                 return;
5605
5606         cpu_buffer = buffer->buffers[cpu];
5607
5608         /*
5609          * If the page is still in use someplace else, or order of the page
5610          * is different from the subbuffer order of the buffer -
5611          * we can't reuse it
5612          */
5613         if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
5614                 goto out;
5615
5616         local_irq_save(flags);
5617         arch_spin_lock(&cpu_buffer->lock);
5618
5619         if (!cpu_buffer->free_page) {
5620                 cpu_buffer->free_page = bpage;
5621                 bpage = NULL;
5622         }
5623
5624         arch_spin_unlock(&cpu_buffer->lock);
5625         local_irq_restore(flags);
5626
5627  out:
5628         free_pages((unsigned long)bpage, data_page->order);
5629         kfree(data_page);
5630 }
5631 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5632
5633 /**
5634  * ring_buffer_read_page - extract a page from the ring buffer
5635  * @buffer: buffer to extract from
5636  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5637  * @len: amount to extract
5638  * @cpu: the cpu of the buffer to extract
5639  * @full: should the extraction only happen when the page is full.
5640  *
5641  * This function will pull out a page from the ring buffer and consume it.
5642  * @data_page must be the address of the variable that was returned
5643  * from ring_buffer_alloc_read_page. This is because the page might be used
5644  * to swap with a page in the ring buffer.
5645  *
5646  * for example:
5647  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
5648  *      if (IS_ERR(rpage))
5649  *              return PTR_ERR(rpage);
5650  *      ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
5651  *      if (ret >= 0)
5652  *              process_page(ring_buffer_read_page_data(rpage), ret);
5653  *      ring_buffer_free_read_page(buffer, cpu, rpage);
5654  *
5655  * When @full is set, the function will not return true unless
5656  * the writer is off the reader page.
5657  *
5658  * Note: it is up to the calling functions to handle sleeps and wakeups.
5659  *  The ring buffer can be used anywhere in the kernel and can not
5660  *  blindly call wake_up. The layer that uses the ring buffer must be
5661  *  responsible for that.
5662  *
5663  * Returns:
5664  *  >=0 if data has been transferred, returns the offset of consumed data.
5665  *  <0 if no data has been transferred.
5666  */
5667 int ring_buffer_read_page(struct trace_buffer *buffer,
5668                           struct buffer_data_read_page *data_page,
5669                           size_t len, int cpu, int full)
5670 {
5671         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5672         struct ring_buffer_event *event;
5673         struct buffer_data_page *bpage;
5674         struct buffer_page *reader;
5675         unsigned long missed_events;
5676         unsigned long flags;
5677         unsigned int commit;
5678         unsigned int read;
5679         u64 save_timestamp;
5680         int ret = -1;
5681
5682         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5683                 goto out;
5684
5685         /*
5686          * If len is not big enough to hold the page header, then
5687          * we can not copy anything.
5688          */
5689         if (len <= BUF_PAGE_HDR_SIZE)
5690                 goto out;
5691
5692         len -= BUF_PAGE_HDR_SIZE;
5693
5694         if (!data_page || !data_page->data)
5695                 goto out;
5696         if (data_page->order != buffer->subbuf_order)
5697                 goto out;
5698
5699         bpage = data_page->data;
5700         if (!bpage)
5701                 goto out;
5702
5703         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5704
5705         reader = rb_get_reader_page(cpu_buffer);
5706         if (!reader)
5707                 goto out_unlock;
5708
5709         event = rb_reader_event(cpu_buffer);
5710
5711         read = reader->read;
5712         commit = rb_page_commit(reader);
5713
5714         /* Check if any events were dropped */
5715         missed_events = cpu_buffer->lost_events;
5716
5717         /*
5718          * If this page has been partially read or
5719          * if len is not big enough to read the rest of the page or
5720          * a writer is still on the page, then
5721          * we must copy the data from the page to the buffer.
5722          * Otherwise, we can simply swap the page with the one passed in.
5723          */
5724         if (read || (len < (commit - read)) ||
5725             cpu_buffer->reader_page == cpu_buffer->commit_page) {
5726                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5727                 unsigned int rpos = read;
5728                 unsigned int pos = 0;
5729                 unsigned int size;
5730
5731                 /*
5732                  * If a full page is expected, this can still be returned
5733                  * if there's been a previous partial read and the
5734                  * rest of the page can be read and the commit page is off
5735                  * the reader page.
5736                  */
5737                 if (full &&
5738                     (!read || (len < (commit - read)) ||
5739                      cpu_buffer->reader_page == cpu_buffer->commit_page))
5740                         goto out_unlock;
5741
5742                 if (len > (commit - read))
5743                         len = (commit - read);
5744
5745                 /* Always keep the time extend and data together */
5746                 size = rb_event_ts_length(event);
5747
5748                 if (len < size)
5749                         goto out_unlock;
5750
5751                 /* save the current timestamp, since the user will need it */
5752                 save_timestamp = cpu_buffer->read_stamp;
5753
5754                 /* Need to copy one event at a time */
5755                 do {
5756                         /* We need the size of one event, because
5757                          * rb_advance_reader only advances by one event,
5758                          * whereas rb_event_ts_length may include the size of
5759                          * one or two events.
5760                          * We have already ensured there's enough space if this
5761                          * is a time extend. */
5762                         size = rb_event_length(event);
5763                         memcpy(bpage->data + pos, rpage->data + rpos, size);
5764
5765                         len -= size;
5766
5767                         rb_advance_reader(cpu_buffer);
5768                         rpos = reader->read;
5769                         pos += size;
5770
5771                         if (rpos >= commit)
5772                                 break;
5773
5774                         event = rb_reader_event(cpu_buffer);
5775                         /* Always keep the time extend and data together */
5776                         size = rb_event_ts_length(event);
5777                 } while (len >= size);
5778
5779                 /* update bpage */
5780                 local_set(&bpage->commit, pos);
5781                 bpage->time_stamp = save_timestamp;
5782
5783                 /* we copied everything to the beginning */
5784                 read = 0;
5785         } else {
5786                 /* update the entry counter */
5787                 cpu_buffer->read += rb_page_entries(reader);
5788                 cpu_buffer->read_bytes += rb_page_commit(reader);
5789
5790                 /* swap the pages */
5791                 rb_init_page(bpage);
5792                 bpage = reader->page;
5793                 reader->page = data_page->data;
5794                 local_set(&reader->write, 0);
5795                 local_set(&reader->entries, 0);
5796                 reader->read = 0;
5797                 data_page->data = bpage;
5798
5799                 /*
5800                  * Use the real_end for the data size,
5801                  * This gives us a chance to store the lost events
5802                  * on the page.
5803                  */
5804                 if (reader->real_end)
5805                         local_set(&bpage->commit, reader->real_end);
5806         }
5807         ret = read;
5808
5809         cpu_buffer->lost_events = 0;
5810
5811         commit = local_read(&bpage->commit);
5812         /*
5813          * Set a flag in the commit field if we lost events
5814          */
5815         if (missed_events) {
5816                 /* If there is room at the end of the page to save the
5817                  * missed events, then record it there.
5818                  */
5819                 if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
5820                         memcpy(&bpage->data[commit], &missed_events,
5821                                sizeof(missed_events));
5822                         local_add(RB_MISSED_STORED, &bpage->commit);
5823                         commit += sizeof(missed_events);
5824                 }
5825                 local_add(RB_MISSED_EVENTS, &bpage->commit);
5826         }
5827
5828         /*
5829          * This page may be off to user land. Zero it out here.
5830          */
5831         if (commit < buffer->subbuf_size)
5832                 memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
5833
5834  out_unlock:
5835         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5836
5837  out:
5838         return ret;
5839 }
5840 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5841
5842 /**
5843  * ring_buffer_read_page_data - get pointer to the data in the page.
5844  * @page:  the page to get the data from
5845  *
5846  * Returns pointer to the actual data in this page.
5847  */
5848 void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
5849 {
5850         return page->data;
5851 }
5852 EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
5853
5854 /**
5855  * ring_buffer_subbuf_size_get - get size of the sub buffer.
5856  * @buffer: the buffer to get the sub buffer size from
5857  *
5858  * Returns size of the sub buffer, in bytes.
5859  */
5860 int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
5861 {
5862         return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
5863 }
5864 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
5865
5866 /**
5867  * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5868  * @buffer: The ring_buffer to get the system sub page order from
5869  *
5870  * By default, one ring buffer sub page equals to one system page. This parameter
5871  * is configurable, per ring buffer. The size of the ring buffer sub page can be
5872  * extended, but must be an order of system page size.
5873  *
5874  * Returns the order of buffer sub page size, in system pages:
5875  * 0 means the sub buffer size is 1 system page and so forth.
5876  * In case of an error < 0 is returned.
5877  */
5878 int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
5879 {
5880         if (!buffer)
5881                 return -EINVAL;
5882
5883         return buffer->subbuf_order;
5884 }
5885 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5886
5887 /**
5888  * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5889  * @buffer: The ring_buffer to set the new page size.
5890  * @order: Order of the system pages in one sub buffer page
5891  *
5892  * By default, one ring buffer pages equals to one system page. This API can be
5893  * used to set new size of the ring buffer page. The size must be order of
5894  * system page size, that's why the input parameter @order is the order of
5895  * system pages that are allocated for one ring buffer page:
5896  *  0 - 1 system page
5897  *  1 - 2 system pages
5898  *  3 - 4 system pages
5899  *  ...
5900  *
5901  * Returns 0 on success or < 0 in case of an error.
5902  */
5903 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5904 {
5905         struct ring_buffer_per_cpu *cpu_buffer;
5906         struct buffer_page *bpage, *tmp;
5907         int old_order, old_size;
5908         int nr_pages;
5909         int psize;
5910         int err;
5911         int cpu;
5912
5913         if (!buffer || order < 0)
5914                 return -EINVAL;
5915
5916         if (buffer->subbuf_order == order)
5917                 return 0;
5918
5919         psize = (1 << order) * PAGE_SIZE;
5920         if (psize <= BUF_PAGE_HDR_SIZE)
5921                 return -EINVAL;
5922
5923         /* Size of a subbuf cannot be greater than the write counter */
5924         if (psize > RB_WRITE_MASK + 1)
5925                 return -EINVAL;
5926
5927         old_order = buffer->subbuf_order;
5928         old_size = buffer->subbuf_size;
5929
5930         /* prevent another thread from changing buffer sizes */
5931         mutex_lock(&buffer->mutex);
5932         atomic_inc(&buffer->record_disabled);
5933
5934         /* Make sure all commits have finished */
5935         synchronize_rcu();
5936
5937         buffer->subbuf_order = order;
5938         buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
5939
5940         /* Make sure all new buffers are allocated, before deleting the old ones */
5941         for_each_buffer_cpu(buffer, cpu) {
5942
5943                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5944                         continue;
5945
5946                 cpu_buffer = buffer->buffers[cpu];
5947
5948                 /* Update the number of pages to match the new size */
5949                 nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
5950                 nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
5951
5952                 /* we need a minimum of two pages */
5953                 if (nr_pages < 2)
5954                         nr_pages = 2;
5955
5956                 cpu_buffer->nr_pages_to_update = nr_pages;
5957
5958                 /* Include the reader page */
5959                 nr_pages++;
5960
5961                 /* Allocate the new size buffer */
5962                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
5963                 if (__rb_allocate_pages(cpu_buffer, nr_pages,
5964                                         &cpu_buffer->new_pages)) {
5965                         /* not enough memory for new pages */
5966                         err = -ENOMEM;
5967                         goto error;
5968                 }
5969         }
5970
5971         for_each_buffer_cpu(buffer, cpu) {
5972
5973                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5974                         continue;
5975
5976                 cpu_buffer = buffer->buffers[cpu];
5977
5978                 /* Clear the head bit to make the link list normal to read */
5979                 rb_head_page_deactivate(cpu_buffer);
5980
5981                 /* Now walk the list and free all the old sub buffers */
5982                 list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
5983                         list_del_init(&bpage->list);
5984                         free_buffer_page(bpage);
5985                 }
5986                 /* The above loop stopped an the last page needing to be freed */
5987                 bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
5988                 free_buffer_page(bpage);
5989
5990                 /* Free the current reader page */
5991                 free_buffer_page(cpu_buffer->reader_page);
5992
5993                 /* One page was allocated for the reader page */
5994                 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
5995                                                      struct buffer_page, list);
5996                 list_del_init(&cpu_buffer->reader_page->list);
5997
5998                 /* The cpu_buffer pages are a link list with no head */
5999                 cpu_buffer->pages = cpu_buffer->new_pages.next;
6000                 cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
6001                 cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
6002
6003                 /* Clear the new_pages list */
6004                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
6005
6006                 cpu_buffer->head_page
6007                         = list_entry(cpu_buffer->pages, struct buffer_page, list);
6008                 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6009
6010                 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6011                 cpu_buffer->nr_pages_to_update = 0;
6012
6013                 free_pages((unsigned long)cpu_buffer->free_page, old_order);
6014                 cpu_buffer->free_page = NULL;
6015
6016                 rb_head_page_activate(cpu_buffer);
6017
6018                 rb_check_pages(cpu_buffer);
6019         }
6020
6021         atomic_dec(&buffer->record_disabled);
6022         mutex_unlock(&buffer->mutex);
6023
6024         return 0;
6025
6026 error:
6027         buffer->subbuf_order = old_order;
6028         buffer->subbuf_size = old_size;
6029
6030         atomic_dec(&buffer->record_disabled);
6031         mutex_unlock(&buffer->mutex);
6032
6033         for_each_buffer_cpu(buffer, cpu) {
6034                 cpu_buffer = buffer->buffers[cpu];
6035
6036                 if (!cpu_buffer->nr_pages_to_update)
6037                         continue;
6038
6039                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6040                         list_del_init(&bpage->list);
6041                         free_buffer_page(bpage);
6042                 }
6043         }
6044
6045         return err;
6046 }
6047 EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6048
6049 /*
6050  * We only allocate new buffers, never free them if the CPU goes down.
6051  * If we were to free the buffer, then the user would lose any trace that was in
6052  * the buffer.
6053  */
6054 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
6055 {
6056         struct trace_buffer *buffer;
6057         long nr_pages_same;
6058         int cpu_i;
6059         unsigned long nr_pages;
6060
6061         buffer = container_of(node, struct trace_buffer, node);
6062         if (cpumask_test_cpu(cpu, buffer->cpumask))
6063                 return 0;
6064
6065         nr_pages = 0;
6066         nr_pages_same = 1;
6067         /* check if all cpu sizes are same */
6068         for_each_buffer_cpu(buffer, cpu_i) {
6069                 /* fill in the size from first enabled cpu */
6070                 if (nr_pages == 0)
6071                         nr_pages = buffer->buffers[cpu_i]->nr_pages;
6072                 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6073                         nr_pages_same = 0;
6074                         break;
6075                 }
6076         }
6077         /* allocate minimum pages, user can later expand it */
6078         if (!nr_pages_same)
6079                 nr_pages = 2;
6080         buffer->buffers[cpu] =
6081                 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6082         if (!buffer->buffers[cpu]) {
6083                 WARN(1, "failed to allocate ring buffer on CPU %u\n",
6084                      cpu);
6085                 return -ENOMEM;
6086         }
6087         smp_wmb();
6088         cpumask_set_cpu(cpu, buffer->cpumask);
6089         return 0;
6090 }
6091
6092 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6093 /*
6094  * This is a basic integrity check of the ring buffer.
6095  * Late in the boot cycle this test will run when configured in.
6096  * It will kick off a thread per CPU that will go into a loop
6097  * writing to the per cpu ring buffer various sizes of data.
6098  * Some of the data will be large items, some small.
6099  *
6100  * Another thread is created that goes into a spin, sending out
6101  * IPIs to the other CPUs to also write into the ring buffer.
6102  * this is to test the nesting ability of the buffer.
6103  *
6104  * Basic stats are recorded and reported. If something in the
6105  * ring buffer should happen that's not expected, a big warning
6106  * is displayed and all ring buffers are disabled.
6107  */
6108 static struct task_struct *rb_threads[NR_CPUS] __initdata;
6109
6110 struct rb_test_data {
6111         struct trace_buffer *buffer;
6112         unsigned long           events;
6113         unsigned long           bytes_written;
6114         unsigned long           bytes_alloc;
6115         unsigned long           bytes_dropped;
6116         unsigned long           events_nested;
6117         unsigned long           bytes_written_nested;
6118         unsigned long           bytes_alloc_nested;
6119         unsigned long           bytes_dropped_nested;
6120         int                     min_size_nested;
6121         int                     max_size_nested;
6122         int                     max_size;
6123         int                     min_size;
6124         int                     cpu;
6125         int                     cnt;
6126 };
6127
6128 static struct rb_test_data rb_data[NR_CPUS] __initdata;
6129
6130 /* 1 meg per cpu */
6131 #define RB_TEST_BUFFER_SIZE     1048576
6132
6133 static char rb_string[] __initdata =
6134         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6135         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6136         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6137
6138 static bool rb_test_started __initdata;
6139
6140 struct rb_item {
6141         int size;
6142         char str[];
6143 };
6144
6145 static __init int rb_write_something(struct rb_test_data *data, bool nested)
6146 {
6147         struct ring_buffer_event *event;
6148         struct rb_item *item;
6149         bool started;
6150         int event_len;
6151         int size;
6152         int len;
6153         int cnt;
6154
6155         /* Have nested writes different that what is written */
6156         cnt = data->cnt + (nested ? 27 : 0);
6157
6158         /* Multiply cnt by ~e, to make some unique increment */
6159         size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6160
6161         len = size + sizeof(struct rb_item);
6162
6163         started = rb_test_started;
6164         /* read rb_test_started before checking buffer enabled */
6165         smp_rmb();
6166
6167         event = ring_buffer_lock_reserve(data->buffer, len);
6168         if (!event) {
6169                 /* Ignore dropped events before test starts. */
6170                 if (started) {
6171                         if (nested)
6172                                 data->bytes_dropped += len;
6173                         else
6174                                 data->bytes_dropped_nested += len;
6175                 }
6176                 return len;
6177         }
6178
6179         event_len = ring_buffer_event_length(event);
6180
6181         if (RB_WARN_ON(data->buffer, event_len < len))
6182                 goto out;
6183
6184         item = ring_buffer_event_data(event);
6185         item->size = size;
6186         memcpy(item->str, rb_string, size);
6187
6188         if (nested) {
6189                 data->bytes_alloc_nested += event_len;
6190                 data->bytes_written_nested += len;
6191                 data->events_nested++;
6192                 if (!data->min_size_nested || len < data->min_size_nested)
6193                         data->min_size_nested = len;
6194                 if (len > data->max_size_nested)
6195                         data->max_size_nested = len;
6196         } else {
6197                 data->bytes_alloc += event_len;
6198                 data->bytes_written += len;
6199                 data->events++;
6200                 if (!data->min_size || len < data->min_size)
6201                         data->max_size = len;
6202                 if (len > data->max_size)
6203                         data->max_size = len;
6204         }
6205
6206  out:
6207         ring_buffer_unlock_commit(data->buffer);
6208
6209         return 0;
6210 }
6211
6212 static __init int rb_test(void *arg)
6213 {
6214         struct rb_test_data *data = arg;
6215
6216         while (!kthread_should_stop()) {
6217                 rb_write_something(data, false);
6218                 data->cnt++;
6219
6220                 set_current_state(TASK_INTERRUPTIBLE);
6221                 /* Now sleep between a min of 100-300us and a max of 1ms */
6222                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6223         }
6224
6225         return 0;
6226 }
6227
6228 static __init void rb_ipi(void *ignore)
6229 {
6230         struct rb_test_data *data;
6231         int cpu = smp_processor_id();
6232
6233         data = &rb_data[cpu];
6234         rb_write_something(data, true);
6235 }
6236
6237 static __init int rb_hammer_test(void *arg)
6238 {
6239         while (!kthread_should_stop()) {
6240
6241                 /* Send an IPI to all cpus to write data! */
6242                 smp_call_function(rb_ipi, NULL, 1);
6243                 /* No sleep, but for non preempt, let others run */
6244                 schedule();
6245         }
6246
6247         return 0;
6248 }
6249
6250 static __init int test_ringbuffer(void)
6251 {
6252         struct task_struct *rb_hammer;
6253         struct trace_buffer *buffer;
6254         int cpu;
6255         int ret = 0;
6256
6257         if (security_locked_down(LOCKDOWN_TRACEFS)) {
6258                 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6259                 return 0;
6260         }
6261
6262         pr_info("Running ring buffer tests...\n");
6263
6264         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6265         if (WARN_ON(!buffer))
6266                 return 0;
6267
6268         /* Disable buffer so that threads can't write to it yet */
6269         ring_buffer_record_off(buffer);
6270
6271         for_each_online_cpu(cpu) {
6272                 rb_data[cpu].buffer = buffer;
6273                 rb_data[cpu].cpu = cpu;
6274                 rb_data[cpu].cnt = cpu;
6275                 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6276                                                      cpu, "rbtester/%u");
6277                 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6278                         pr_cont("FAILED\n");
6279                         ret = PTR_ERR(rb_threads[cpu]);
6280                         goto out_free;
6281                 }
6282         }
6283
6284         /* Now create the rb hammer! */
6285         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6286         if (WARN_ON(IS_ERR(rb_hammer))) {
6287                 pr_cont("FAILED\n");
6288                 ret = PTR_ERR(rb_hammer);
6289                 goto out_free;
6290         }
6291
6292         ring_buffer_record_on(buffer);
6293         /*
6294          * Show buffer is enabled before setting rb_test_started.
6295          * Yes there's a small race window where events could be
6296          * dropped and the thread wont catch it. But when a ring
6297          * buffer gets enabled, there will always be some kind of
6298          * delay before other CPUs see it. Thus, we don't care about
6299          * those dropped events. We care about events dropped after
6300          * the threads see that the buffer is active.
6301          */
6302         smp_wmb();
6303         rb_test_started = true;
6304
6305         set_current_state(TASK_INTERRUPTIBLE);
6306         /* Just run for 10 seconds */;
6307         schedule_timeout(10 * HZ);
6308
6309         kthread_stop(rb_hammer);
6310
6311  out_free:
6312         for_each_online_cpu(cpu) {
6313                 if (!rb_threads[cpu])
6314                         break;
6315                 kthread_stop(rb_threads[cpu]);
6316         }
6317         if (ret) {
6318                 ring_buffer_free(buffer);
6319                 return ret;
6320         }
6321
6322         /* Report! */
6323         pr_info("finished\n");
6324         for_each_online_cpu(cpu) {
6325                 struct ring_buffer_event *event;
6326                 struct rb_test_data *data = &rb_data[cpu];
6327                 struct rb_item *item;
6328                 unsigned long total_events;
6329                 unsigned long total_dropped;
6330                 unsigned long total_written;
6331                 unsigned long total_alloc;
6332                 unsigned long total_read = 0;
6333                 unsigned long total_size = 0;
6334                 unsigned long total_len = 0;
6335                 unsigned long total_lost = 0;
6336                 unsigned long lost;
6337                 int big_event_size;
6338                 int small_event_size;
6339
6340                 ret = -1;
6341
6342                 total_events = data->events + data->events_nested;
6343                 total_written = data->bytes_written + data->bytes_written_nested;
6344                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6345                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6346
6347                 big_event_size = data->max_size + data->max_size_nested;
6348                 small_event_size = data->min_size + data->min_size_nested;
6349
6350                 pr_info("CPU %d:\n", cpu);
6351                 pr_info("              events:    %ld\n", total_events);
6352                 pr_info("       dropped bytes:    %ld\n", total_dropped);
6353                 pr_info("       alloced bytes:    %ld\n", total_alloc);
6354                 pr_info("       written bytes:    %ld\n", total_written);
6355                 pr_info("       biggest event:    %d\n", big_event_size);
6356                 pr_info("      smallest event:    %d\n", small_event_size);
6357
6358                 if (RB_WARN_ON(buffer, total_dropped))
6359                         break;
6360
6361                 ret = 0;
6362
6363                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6364                         total_lost += lost;
6365                         item = ring_buffer_event_data(event);
6366                         total_len += ring_buffer_event_length(event);
6367                         total_size += item->size + sizeof(struct rb_item);
6368                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6369                                 pr_info("FAILED!\n");
6370                                 pr_info("buffer had: %.*s\n", item->size, item->str);
6371                                 pr_info("expected:   %.*s\n", item->size, rb_string);
6372                                 RB_WARN_ON(buffer, 1);
6373                                 ret = -1;
6374                                 break;
6375                         }
6376                         total_read++;
6377                 }
6378                 if (ret)
6379                         break;
6380
6381                 ret = -1;
6382
6383                 pr_info("         read events:   %ld\n", total_read);
6384                 pr_info("         lost events:   %ld\n", total_lost);
6385                 pr_info("        total events:   %ld\n", total_lost + total_read);
6386                 pr_info("  recorded len bytes:   %ld\n", total_len);
6387                 pr_info(" recorded size bytes:   %ld\n", total_size);
6388                 if (total_lost) {
6389                         pr_info(" With dropped events, record len and size may not match\n"
6390                                 " alloced and written from above\n");
6391                 } else {
6392                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
6393                                        total_size != total_written))
6394                                 break;
6395                 }
6396                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6397                         break;
6398
6399                 ret = 0;
6400         }
6401         if (!ret)
6402                 pr_info("Ring buffer PASSED!\n");
6403
6404         ring_buffer_free(buffer);
6405         return 0;
6406 }
6407
6408 late_initcall(test_ringbuffer);
6409 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */