Merge tag 'for-linus-5.9-1' of git://github.com/cminyard/linux-ipmi
[linux-2.6-microblaze.git] / kernel / trace / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic ring buffer
4  *
5  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6  */
7 #include <linux/trace_events.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/trace_clock.h>
10 #include <linux/sched/clock.h>
11 #include <linux/trace_seq.h>
12 #include <linux/spinlock.h>
13 #include <linux/irq_work.h>
14 #include <linux/security.h>
15 #include <linux/uaccess.h>
16 #include <linux/hardirq.h>
17 #include <linux/kthread.h>      /* for self test */
18 #include <linux/module.h>
19 #include <linux/percpu.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/list.h>
26 #include <linux/cpu.h>
27 #include <linux/oom.h>
28
29 #include <asm/local.h>
30
31 static void update_pages_handler(struct work_struct *work);
32
33 /*
34  * The ring buffer header is special. We must manually up keep it.
35  */
36 int ring_buffer_print_entry_header(struct trace_seq *s)
37 {
38         trace_seq_puts(s, "# compressed entry header\n");
39         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
40         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
41         trace_seq_puts(s, "\tarray       :   32 bits\n");
42         trace_seq_putc(s, '\n');
43         trace_seq_printf(s, "\tpadding     : type == %d\n",
44                          RINGBUF_TYPE_PADDING);
45         trace_seq_printf(s, "\ttime_extend : type == %d\n",
46                          RINGBUF_TYPE_TIME_EXTEND);
47         trace_seq_printf(s, "\ttime_stamp : type == %d\n",
48                          RINGBUF_TYPE_TIME_STAMP);
49         trace_seq_printf(s, "\tdata max type_len  == %d\n",
50                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
51
52         return !trace_seq_has_overflowed(s);
53 }
54
55 /*
56  * The ring buffer is made up of a list of pages. A separate list of pages is
57  * allocated for each CPU. A writer may only write to a buffer that is
58  * associated with the CPU it is currently executing on.  A reader may read
59  * from any per cpu buffer.
60  *
61  * The reader is special. For each per cpu buffer, the reader has its own
62  * reader page. When a reader has read the entire reader page, this reader
63  * page is swapped with another page in the ring buffer.
64  *
65  * Now, as long as the writer is off the reader page, the reader can do what
66  * ever it wants with that page. The writer will never write to that page
67  * again (as long as it is out of the ring buffer).
68  *
69  * Here's some silly ASCII art.
70  *
71  *   +------+
72  *   |reader|          RING BUFFER
73  *   |page  |
74  *   +------+        +---+   +---+   +---+
75  *                   |   |-->|   |-->|   |
76  *                   +---+   +---+   +---+
77  *                     ^               |
78  *                     |               |
79  *                     +---------------+
80  *
81  *
82  *   +------+
83  *   |reader|          RING BUFFER
84  *   |page  |------------------v
85  *   +------+        +---+   +---+   +---+
86  *                   |   |-->|   |-->|   |
87  *                   +---+   +---+   +---+
88  *                     ^               |
89  *                     |               |
90  *                     +---------------+
91  *
92  *
93  *   +------+
94  *   |reader|          RING BUFFER
95  *   |page  |------------------v
96  *   +------+        +---+   +---+   +---+
97  *      ^            |   |-->|   |-->|   |
98  *      |            +---+   +---+   +---+
99  *      |                              |
100  *      |                              |
101  *      +------------------------------+
102  *
103  *
104  *   +------+
105  *   |buffer|          RING BUFFER
106  *   |page  |------------------v
107  *   +------+        +---+   +---+   +---+
108  *      ^            |   |   |   |-->|   |
109  *      |   New      +---+   +---+   +---+
110  *      |  Reader------^               |
111  *      |   page                       |
112  *      +------------------------------+
113  *
114  *
115  * After we make this swap, the reader can hand this page off to the splice
116  * code and be done with it. It can even allocate a new page if it needs to
117  * and swap that into the ring buffer.
118  *
119  * We will be using cmpxchg soon to make all this lockless.
120  *
121  */
122
123 /* Used for individual buffers (after the counter) */
124 #define RB_BUFFER_OFF           (1 << 20)
125
126 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
127
128 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
129 #define RB_ALIGNMENT            4U
130 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
131 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
132 #define RB_ALIGN_DATA           __aligned(RB_ALIGNMENT)
133
134 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
135 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
136
137 enum {
138         RB_LEN_TIME_EXTEND = 8,
139         RB_LEN_TIME_STAMP =  8,
140 };
141
142 #define skip_time_extend(event) \
143         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
144
145 #define extended_time(event) \
146         (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
147
148 static inline int rb_null_event(struct ring_buffer_event *event)
149 {
150         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
151 }
152
153 static void rb_event_set_padding(struct ring_buffer_event *event)
154 {
155         /* padding has a NULL time_delta */
156         event->type_len = RINGBUF_TYPE_PADDING;
157         event->time_delta = 0;
158 }
159
160 static unsigned
161 rb_event_data_length(struct ring_buffer_event *event)
162 {
163         unsigned length;
164
165         if (event->type_len)
166                 length = event->type_len * RB_ALIGNMENT;
167         else
168                 length = event->array[0];
169         return length + RB_EVNT_HDR_SIZE;
170 }
171
172 /*
173  * Return the length of the given event. Will return
174  * the length of the time extend if the event is a
175  * time extend.
176  */
177 static inline unsigned
178 rb_event_length(struct ring_buffer_event *event)
179 {
180         switch (event->type_len) {
181         case RINGBUF_TYPE_PADDING:
182                 if (rb_null_event(event))
183                         /* undefined */
184                         return -1;
185                 return  event->array[0] + RB_EVNT_HDR_SIZE;
186
187         case RINGBUF_TYPE_TIME_EXTEND:
188                 return RB_LEN_TIME_EXTEND;
189
190         case RINGBUF_TYPE_TIME_STAMP:
191                 return RB_LEN_TIME_STAMP;
192
193         case RINGBUF_TYPE_DATA:
194                 return rb_event_data_length(event);
195         default:
196                 WARN_ON_ONCE(1);
197         }
198         /* not hit */
199         return 0;
200 }
201
202 /*
203  * Return total length of time extend and data,
204  *   or just the event length for all other events.
205  */
206 static inline unsigned
207 rb_event_ts_length(struct ring_buffer_event *event)
208 {
209         unsigned len = 0;
210
211         if (extended_time(event)) {
212                 /* time extends include the data event after it */
213                 len = RB_LEN_TIME_EXTEND;
214                 event = skip_time_extend(event);
215         }
216         return len + rb_event_length(event);
217 }
218
219 /**
220  * ring_buffer_event_length - return the length of the event
221  * @event: the event to get the length of
222  *
223  * Returns the size of the data load of a data event.
224  * If the event is something other than a data event, it
225  * returns the size of the event itself. With the exception
226  * of a TIME EXTEND, where it still returns the size of the
227  * data load of the data event after it.
228  */
229 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
230 {
231         unsigned length;
232
233         if (extended_time(event))
234                 event = skip_time_extend(event);
235
236         length = rb_event_length(event);
237         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
238                 return length;
239         length -= RB_EVNT_HDR_SIZE;
240         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
241                 length -= sizeof(event->array[0]);
242         return length;
243 }
244 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
245
246 /* inline for ring buffer fast paths */
247 static __always_inline void *
248 rb_event_data(struct ring_buffer_event *event)
249 {
250         if (extended_time(event))
251                 event = skip_time_extend(event);
252         WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
253         /* If length is in len field, then array[0] has the data */
254         if (event->type_len)
255                 return (void *)&event->array[0];
256         /* Otherwise length is in array[0] and array[1] has the data */
257         return (void *)&event->array[1];
258 }
259
260 /**
261  * ring_buffer_event_data - return the data of the event
262  * @event: the event to get the data from
263  */
264 void *ring_buffer_event_data(struct ring_buffer_event *event)
265 {
266         return rb_event_data(event);
267 }
268 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
269
270 #define for_each_buffer_cpu(buffer, cpu)                \
271         for_each_cpu(cpu, buffer->cpumask)
272
273 #define for_each_online_buffer_cpu(buffer, cpu)         \
274         for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
275
276 #define TS_SHIFT        27
277 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
278 #define TS_DELTA_TEST   (~TS_MASK)
279
280 /**
281  * ring_buffer_event_time_stamp - return the event's extended timestamp
282  * @event: the event to get the timestamp of
283  *
284  * Returns the extended timestamp associated with a data event.
285  * An extended time_stamp is a 64-bit timestamp represented
286  * internally in a special way that makes the best use of space
287  * contained within a ring buffer event.  This function decodes
288  * it and maps it to a straight u64 value.
289  */
290 u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
291 {
292         u64 ts;
293
294         ts = event->array[0];
295         ts <<= TS_SHIFT;
296         ts += event->time_delta;
297
298         return ts;
299 }
300
301 /* Flag when events were overwritten */
302 #define RB_MISSED_EVENTS        (1 << 31)
303 /* Missed count stored at end */
304 #define RB_MISSED_STORED        (1 << 30)
305
306 struct buffer_data_page {
307         u64              time_stamp;    /* page time stamp */
308         local_t          commit;        /* write committed index */
309         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
310 };
311
312 /*
313  * Note, the buffer_page list must be first. The buffer pages
314  * are allocated in cache lines, which means that each buffer
315  * page will be at the beginning of a cache line, and thus
316  * the least significant bits will be zero. We use this to
317  * add flags in the list struct pointers, to make the ring buffer
318  * lockless.
319  */
320 struct buffer_page {
321         struct list_head list;          /* list of buffer pages */
322         local_t          write;         /* index for next write */
323         unsigned         read;          /* index for next read */
324         local_t          entries;       /* entries on this page */
325         unsigned long    real_end;      /* real end of data */
326         struct buffer_data_page *page;  /* Actual data page */
327 };
328
329 /*
330  * The buffer page counters, write and entries, must be reset
331  * atomically when crossing page boundaries. To synchronize this
332  * update, two counters are inserted into the number. One is
333  * the actual counter for the write position or count on the page.
334  *
335  * The other is a counter of updaters. Before an update happens
336  * the update partition of the counter is incremented. This will
337  * allow the updater to update the counter atomically.
338  *
339  * The counter is 20 bits, and the state data is 12.
340  */
341 #define RB_WRITE_MASK           0xfffff
342 #define RB_WRITE_INTCNT         (1 << 20)
343
344 static void rb_init_page(struct buffer_data_page *bpage)
345 {
346         local_set(&bpage->commit, 0);
347 }
348
349 /*
350  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
351  * this issue out.
352  */
353 static void free_buffer_page(struct buffer_page *bpage)
354 {
355         free_page((unsigned long)bpage->page);
356         kfree(bpage);
357 }
358
359 /*
360  * We need to fit the time_stamp delta into 27 bits.
361  */
362 static inline int test_time_stamp(u64 delta)
363 {
364         if (delta & TS_DELTA_TEST)
365                 return 1;
366         return 0;
367 }
368
369 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
370
371 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
372 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
373
374 int ring_buffer_print_page_header(struct trace_seq *s)
375 {
376         struct buffer_data_page field;
377
378         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
379                          "offset:0;\tsize:%u;\tsigned:%u;\n",
380                          (unsigned int)sizeof(field.time_stamp),
381                          (unsigned int)is_signed_type(u64));
382
383         trace_seq_printf(s, "\tfield: local_t commit;\t"
384                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
385                          (unsigned int)offsetof(typeof(field), commit),
386                          (unsigned int)sizeof(field.commit),
387                          (unsigned int)is_signed_type(long));
388
389         trace_seq_printf(s, "\tfield: int overwrite;\t"
390                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
391                          (unsigned int)offsetof(typeof(field), commit),
392                          1,
393                          (unsigned int)is_signed_type(long));
394
395         trace_seq_printf(s, "\tfield: char data;\t"
396                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
397                          (unsigned int)offsetof(typeof(field), data),
398                          (unsigned int)BUF_PAGE_SIZE,
399                          (unsigned int)is_signed_type(char));
400
401         return !trace_seq_has_overflowed(s);
402 }
403
404 struct rb_irq_work {
405         struct irq_work                 work;
406         wait_queue_head_t               waiters;
407         wait_queue_head_t               full_waiters;
408         bool                            waiters_pending;
409         bool                            full_waiters_pending;
410         bool                            wakeup_full;
411 };
412
413 /*
414  * Structure to hold event state and handle nested events.
415  */
416 struct rb_event_info {
417         u64                     ts;
418         u64                     delta;
419         u64                     before;
420         u64                     after;
421         unsigned long           length;
422         struct buffer_page      *tail_page;
423         int                     add_timestamp;
424 };
425
426 /*
427  * Used for the add_timestamp
428  *  NONE
429  *  EXTEND - wants a time extend
430  *  ABSOLUTE - the buffer requests all events to have absolute time stamps
431  *  FORCE - force a full time stamp.
432  */
433 enum {
434         RB_ADD_STAMP_NONE               = 0,
435         RB_ADD_STAMP_EXTEND             = BIT(1),
436         RB_ADD_STAMP_ABSOLUTE           = BIT(2),
437         RB_ADD_STAMP_FORCE              = BIT(3)
438 };
439 /*
440  * Used for which event context the event is in.
441  *  NMI     = 0
442  *  IRQ     = 1
443  *  SOFTIRQ = 2
444  *  NORMAL  = 3
445  *
446  * See trace_recursive_lock() comment below for more details.
447  */
448 enum {
449         RB_CTX_NMI,
450         RB_CTX_IRQ,
451         RB_CTX_SOFTIRQ,
452         RB_CTX_NORMAL,
453         RB_CTX_MAX
454 };
455
456 #if BITS_PER_LONG == 32
457 #define RB_TIME_32
458 #endif
459
460 /* To test on 64 bit machines */
461 //#define RB_TIME_32
462
463 #ifdef RB_TIME_32
464
465 struct rb_time_struct {
466         local_t         cnt;
467         local_t         top;
468         local_t         bottom;
469 };
470 #else
471 #include <asm/local64.h>
472 struct rb_time_struct {
473         local64_t       time;
474 };
475 #endif
476 typedef struct rb_time_struct rb_time_t;
477
478 /*
479  * head_page == tail_page && head == tail then buffer is empty.
480  */
481 struct ring_buffer_per_cpu {
482         int                             cpu;
483         atomic_t                        record_disabled;
484         atomic_t                        resize_disabled;
485         struct trace_buffer     *buffer;
486         raw_spinlock_t                  reader_lock;    /* serialize readers */
487         arch_spinlock_t                 lock;
488         struct lock_class_key           lock_key;
489         struct buffer_data_page         *free_page;
490         unsigned long                   nr_pages;
491         unsigned int                    current_context;
492         struct list_head                *pages;
493         struct buffer_page              *head_page;     /* read from head */
494         struct buffer_page              *tail_page;     /* write to tail */
495         struct buffer_page              *commit_page;   /* committed pages */
496         struct buffer_page              *reader_page;
497         unsigned long                   lost_events;
498         unsigned long                   last_overrun;
499         unsigned long                   nest;
500         local_t                         entries_bytes;
501         local_t                         entries;
502         local_t                         overrun;
503         local_t                         commit_overrun;
504         local_t                         dropped_events;
505         local_t                         committing;
506         local_t                         commits;
507         local_t                         pages_touched;
508         local_t                         pages_read;
509         long                            last_pages_touch;
510         size_t                          shortest_full;
511         unsigned long                   read;
512         unsigned long                   read_bytes;
513         rb_time_t                       write_stamp;
514         rb_time_t                       before_stamp;
515         u64                             read_stamp;
516         /* ring buffer pages to update, > 0 to add, < 0 to remove */
517         long                            nr_pages_to_update;
518         struct list_head                new_pages; /* new pages to add */
519         struct work_struct              update_pages_work;
520         struct completion               update_done;
521
522         struct rb_irq_work              irq_work;
523 };
524
525 struct trace_buffer {
526         unsigned                        flags;
527         int                             cpus;
528         atomic_t                        record_disabled;
529         cpumask_var_t                   cpumask;
530
531         struct lock_class_key           *reader_lock_key;
532
533         struct mutex                    mutex;
534
535         struct ring_buffer_per_cpu      **buffers;
536
537         struct hlist_node               node;
538         u64                             (*clock)(void);
539
540         struct rb_irq_work              irq_work;
541         bool                            time_stamp_abs;
542 };
543
544 struct ring_buffer_iter {
545         struct ring_buffer_per_cpu      *cpu_buffer;
546         unsigned long                   head;
547         unsigned long                   next_event;
548         struct buffer_page              *head_page;
549         struct buffer_page              *cache_reader_page;
550         unsigned long                   cache_read;
551         u64                             read_stamp;
552         u64                             page_stamp;
553         struct ring_buffer_event        *event;
554         int                             missed_events;
555 };
556
557 #ifdef RB_TIME_32
558
559 /*
560  * On 32 bit machines, local64_t is very expensive. As the ring
561  * buffer doesn't need all the features of a true 64 bit atomic,
562  * on 32 bit, it uses these functions (64 still uses local64_t).
563  *
564  * For the ring buffer, 64 bit required operations for the time is
565  * the following:
566  *
567  *  - Only need 59 bits (uses 60 to make it even).
568  *  - Reads may fail if it interrupted a modification of the time stamp.
569  *      It will succeed if it did not interrupt another write even if
570  *      the read itself is interrupted by a write.
571  *      It returns whether it was successful or not.
572  *
573  *  - Writes always succeed and will overwrite other writes and writes
574  *      that were done by events interrupting the current write.
575  *
576  *  - A write followed by a read of the same time stamp will always succeed,
577  *      but may not contain the same value.
578  *
579  *  - A cmpxchg will fail if it interrupted another write or cmpxchg.
580  *      Other than that, it acts like a normal cmpxchg.
581  *
582  * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
583  *  (bottom being the least significant 30 bits of the 60 bit time stamp).
584  *
585  * The two most significant bits of each half holds a 2 bit counter (0-3).
586  * Each update will increment this counter by one.
587  * When reading the top and bottom, if the two counter bits match then the
588  *  top and bottom together make a valid 60 bit number.
589  */
590 #define RB_TIME_SHIFT   30
591 #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
592
593 static inline int rb_time_cnt(unsigned long val)
594 {
595         return (val >> RB_TIME_SHIFT) & 3;
596 }
597
598 static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
599 {
600         u64 val;
601
602         val = top & RB_TIME_VAL_MASK;
603         val <<= RB_TIME_SHIFT;
604         val |= bottom & RB_TIME_VAL_MASK;
605
606         return val;
607 }
608
609 static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
610 {
611         unsigned long top, bottom;
612         unsigned long c;
613
614         /*
615          * If the read is interrupted by a write, then the cnt will
616          * be different. Loop until both top and bottom have been read
617          * without interruption.
618          */
619         do {
620                 c = local_read(&t->cnt);
621                 top = local_read(&t->top);
622                 bottom = local_read(&t->bottom);
623         } while (c != local_read(&t->cnt));
624
625         *cnt = rb_time_cnt(top);
626
627         /* If top and bottom counts don't match, this interrupted a write */
628         if (*cnt != rb_time_cnt(bottom))
629                 return false;
630
631         *ret = rb_time_val(top, bottom);
632         return true;
633 }
634
635 static bool rb_time_read(rb_time_t *t, u64 *ret)
636 {
637         unsigned long cnt;
638
639         return __rb_time_read(t, ret, &cnt);
640 }
641
642 static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
643 {
644         return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
645 }
646
647 static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom)
648 {
649         *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
650         *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
651 }
652
653 static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
654 {
655         val = rb_time_val_cnt(val, cnt);
656         local_set(t, val);
657 }
658
659 static void rb_time_set(rb_time_t *t, u64 val)
660 {
661         unsigned long cnt, top, bottom;
662
663         rb_time_split(val, &top, &bottom);
664
665         /* Writes always succeed with a valid number even if it gets interrupted. */
666         do {
667                 cnt = local_inc_return(&t->cnt);
668                 rb_time_val_set(&t->top, top, cnt);
669                 rb_time_val_set(&t->bottom, bottom, cnt);
670         } while (cnt != local_read(&t->cnt));
671 }
672
673 static inline bool
674 rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
675 {
676         unsigned long ret;
677
678         ret = local_cmpxchg(l, expect, set);
679         return ret == expect;
680 }
681
682 static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
683 {
684         unsigned long cnt, top, bottom;
685         unsigned long cnt2, top2, bottom2;
686         u64 val;
687
688         /* The cmpxchg always fails if it interrupted an update */
689          if (!__rb_time_read(t, &val, &cnt2))
690                  return false;
691
692          if (val != expect)
693                  return false;
694
695          cnt = local_read(&t->cnt);
696          if ((cnt & 3) != cnt2)
697                  return false;
698
699          cnt2 = cnt + 1;
700
701          rb_time_split(val, &top, &bottom);
702          top = rb_time_val_cnt(top, cnt);
703          bottom = rb_time_val_cnt(bottom, cnt);
704
705          rb_time_split(set, &top2, &bottom2);
706          top2 = rb_time_val_cnt(top2, cnt2);
707          bottom2 = rb_time_val_cnt(bottom2, cnt2);
708
709         if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
710                 return false;
711         if (!rb_time_read_cmpxchg(&t->top, top, top2))
712                 return false;
713         if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
714                 return false;
715         return true;
716 }
717
718 #else /* 64 bits */
719
720 /* local64_t always succeeds */
721
722 static inline bool rb_time_read(rb_time_t *t, u64 *ret)
723 {
724         *ret = local64_read(&t->time);
725         return true;
726 }
727 static void rb_time_set(rb_time_t *t, u64 val)
728 {
729         local64_set(&t->time, val);
730 }
731
732 static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
733 {
734         u64 val;
735         val = local64_cmpxchg(&t->time, expect, set);
736         return val == expect;
737 }
738 #endif
739
740 /**
741  * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
742  * @buffer: The ring_buffer to get the number of pages from
743  * @cpu: The cpu of the ring_buffer to get the number of pages from
744  *
745  * Returns the number of pages used by a per_cpu buffer of the ring buffer.
746  */
747 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
748 {
749         return buffer->buffers[cpu]->nr_pages;
750 }
751
752 /**
753  * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
754  * @buffer: The ring_buffer to get the number of pages from
755  * @cpu: The cpu of the ring_buffer to get the number of pages from
756  *
757  * Returns the number of pages that have content in the ring buffer.
758  */
759 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
760 {
761         size_t read;
762         size_t cnt;
763
764         read = local_read(&buffer->buffers[cpu]->pages_read);
765         cnt = local_read(&buffer->buffers[cpu]->pages_touched);
766         /* The reader can read an empty page, but not more than that */
767         if (cnt < read) {
768                 WARN_ON_ONCE(read > cnt + 1);
769                 return 0;
770         }
771
772         return cnt - read;
773 }
774
775 /*
776  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
777  *
778  * Schedules a delayed work to wake up any task that is blocked on the
779  * ring buffer waiters queue.
780  */
781 static void rb_wake_up_waiters(struct irq_work *work)
782 {
783         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
784
785         wake_up_all(&rbwork->waiters);
786         if (rbwork->wakeup_full) {
787                 rbwork->wakeup_full = false;
788                 wake_up_all(&rbwork->full_waiters);
789         }
790 }
791
792 /**
793  * ring_buffer_wait - wait for input to the ring buffer
794  * @buffer: buffer to wait on
795  * @cpu: the cpu buffer to wait on
796  * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
797  *
798  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
799  * as data is added to any of the @buffer's cpu buffers. Otherwise
800  * it will wait for data to be added to a specific cpu buffer.
801  */
802 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
803 {
804         struct ring_buffer_per_cpu *cpu_buffer;
805         DEFINE_WAIT(wait);
806         struct rb_irq_work *work;
807         int ret = 0;
808
809         /*
810          * Depending on what the caller is waiting for, either any
811          * data in any cpu buffer, or a specific buffer, put the
812          * caller on the appropriate wait queue.
813          */
814         if (cpu == RING_BUFFER_ALL_CPUS) {
815                 work = &buffer->irq_work;
816                 /* Full only makes sense on per cpu reads */
817                 full = 0;
818         } else {
819                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
820                         return -ENODEV;
821                 cpu_buffer = buffer->buffers[cpu];
822                 work = &cpu_buffer->irq_work;
823         }
824
825
826         while (true) {
827                 if (full)
828                         prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
829                 else
830                         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
831
832                 /*
833                  * The events can happen in critical sections where
834                  * checking a work queue can cause deadlocks.
835                  * After adding a task to the queue, this flag is set
836                  * only to notify events to try to wake up the queue
837                  * using irq_work.
838                  *
839                  * We don't clear it even if the buffer is no longer
840                  * empty. The flag only causes the next event to run
841                  * irq_work to do the work queue wake up. The worse
842                  * that can happen if we race with !trace_empty() is that
843                  * an event will cause an irq_work to try to wake up
844                  * an empty queue.
845                  *
846                  * There's no reason to protect this flag either, as
847                  * the work queue and irq_work logic will do the necessary
848                  * synchronization for the wake ups. The only thing
849                  * that is necessary is that the wake up happens after
850                  * a task has been queued. It's OK for spurious wake ups.
851                  */
852                 if (full)
853                         work->full_waiters_pending = true;
854                 else
855                         work->waiters_pending = true;
856
857                 if (signal_pending(current)) {
858                         ret = -EINTR;
859                         break;
860                 }
861
862                 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
863                         break;
864
865                 if (cpu != RING_BUFFER_ALL_CPUS &&
866                     !ring_buffer_empty_cpu(buffer, cpu)) {
867                         unsigned long flags;
868                         bool pagebusy;
869                         size_t nr_pages;
870                         size_t dirty;
871
872                         if (!full)
873                                 break;
874
875                         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
876                         pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
877                         nr_pages = cpu_buffer->nr_pages;
878                         dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
879                         if (!cpu_buffer->shortest_full ||
880                             cpu_buffer->shortest_full < full)
881                                 cpu_buffer->shortest_full = full;
882                         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
883                         if (!pagebusy &&
884                             (!nr_pages || (dirty * 100) > full * nr_pages))
885                                 break;
886                 }
887
888                 schedule();
889         }
890
891         if (full)
892                 finish_wait(&work->full_waiters, &wait);
893         else
894                 finish_wait(&work->waiters, &wait);
895
896         return ret;
897 }
898
899 /**
900  * ring_buffer_poll_wait - poll on buffer input
901  * @buffer: buffer to wait on
902  * @cpu: the cpu buffer to wait on
903  * @filp: the file descriptor
904  * @poll_table: The poll descriptor
905  *
906  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
907  * as data is added to any of the @buffer's cpu buffers. Otherwise
908  * it will wait for data to be added to a specific cpu buffer.
909  *
910  * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
911  * zero otherwise.
912  */
913 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
914                           struct file *filp, poll_table *poll_table)
915 {
916         struct ring_buffer_per_cpu *cpu_buffer;
917         struct rb_irq_work *work;
918
919         if (cpu == RING_BUFFER_ALL_CPUS)
920                 work = &buffer->irq_work;
921         else {
922                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
923                         return -EINVAL;
924
925                 cpu_buffer = buffer->buffers[cpu];
926                 work = &cpu_buffer->irq_work;
927         }
928
929         poll_wait(filp, &work->waiters, poll_table);
930         work->waiters_pending = true;
931         /*
932          * There's a tight race between setting the waiters_pending and
933          * checking if the ring buffer is empty.  Once the waiters_pending bit
934          * is set, the next event will wake the task up, but we can get stuck
935          * if there's only a single event in.
936          *
937          * FIXME: Ideally, we need a memory barrier on the writer side as well,
938          * but adding a memory barrier to all events will cause too much of a
939          * performance hit in the fast path.  We only need a memory barrier when
940          * the buffer goes from empty to having content.  But as this race is
941          * extremely small, and it's not a problem if another event comes in, we
942          * will fix it later.
943          */
944         smp_mb();
945
946         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
947             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
948                 return EPOLLIN | EPOLLRDNORM;
949         return 0;
950 }
951
952 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
953 #define RB_WARN_ON(b, cond)                                             \
954         ({                                                              \
955                 int _____ret = unlikely(cond);                          \
956                 if (_____ret) {                                         \
957                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
958                                 struct ring_buffer_per_cpu *__b =       \
959                                         (void *)b;                      \
960                                 atomic_inc(&__b->buffer->record_disabled); \
961                         } else                                          \
962                                 atomic_inc(&b->record_disabled);        \
963                         WARN_ON(1);                                     \
964                 }                                                       \
965                 _____ret;                                               \
966         })
967
968 /* Up this if you want to test the TIME_EXTENTS and normalization */
969 #define DEBUG_SHIFT 0
970
971 static inline u64 rb_time_stamp(struct trace_buffer *buffer)
972 {
973         u64 ts;
974
975         /* Skip retpolines :-( */
976         if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
977                 ts = trace_clock_local();
978         else
979                 ts = buffer->clock();
980
981         /* shift to debug/test normalization and TIME_EXTENTS */
982         return ts << DEBUG_SHIFT;
983 }
984
985 u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
986 {
987         u64 time;
988
989         preempt_disable_notrace();
990         time = rb_time_stamp(buffer);
991         preempt_enable_notrace();
992
993         return time;
994 }
995 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
996
997 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
998                                       int cpu, u64 *ts)
999 {
1000         /* Just stupid testing the normalize function and deltas */
1001         *ts >>= DEBUG_SHIFT;
1002 }
1003 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1004
1005 /*
1006  * Making the ring buffer lockless makes things tricky.
1007  * Although writes only happen on the CPU that they are on,
1008  * and they only need to worry about interrupts. Reads can
1009  * happen on any CPU.
1010  *
1011  * The reader page is always off the ring buffer, but when the
1012  * reader finishes with a page, it needs to swap its page with
1013  * a new one from the buffer. The reader needs to take from
1014  * the head (writes go to the tail). But if a writer is in overwrite
1015  * mode and wraps, it must push the head page forward.
1016  *
1017  * Here lies the problem.
1018  *
1019  * The reader must be careful to replace only the head page, and
1020  * not another one. As described at the top of the file in the
1021  * ASCII art, the reader sets its old page to point to the next
1022  * page after head. It then sets the page after head to point to
1023  * the old reader page. But if the writer moves the head page
1024  * during this operation, the reader could end up with the tail.
1025  *
1026  * We use cmpxchg to help prevent this race. We also do something
1027  * special with the page before head. We set the LSB to 1.
1028  *
1029  * When the writer must push the page forward, it will clear the
1030  * bit that points to the head page, move the head, and then set
1031  * the bit that points to the new head page.
1032  *
1033  * We also don't want an interrupt coming in and moving the head
1034  * page on another writer. Thus we use the second LSB to catch
1035  * that too. Thus:
1036  *
1037  * head->list->prev->next        bit 1          bit 0
1038  *                              -------        -------
1039  * Normal page                     0              0
1040  * Points to head page             0              1
1041  * New head page                   1              0
1042  *
1043  * Note we can not trust the prev pointer of the head page, because:
1044  *
1045  * +----+       +-----+        +-----+
1046  * |    |------>|  T  |---X--->|  N  |
1047  * |    |<------|     |        |     |
1048  * +----+       +-----+        +-----+
1049  *   ^                           ^ |
1050  *   |          +-----+          | |
1051  *   +----------|  R  |----------+ |
1052  *              |     |<-----------+
1053  *              +-----+
1054  *
1055  * Key:  ---X-->  HEAD flag set in pointer
1056  *         T      Tail page
1057  *         R      Reader page
1058  *         N      Next page
1059  *
1060  * (see __rb_reserve_next() to see where this happens)
1061  *
1062  *  What the above shows is that the reader just swapped out
1063  *  the reader page with a page in the buffer, but before it
1064  *  could make the new header point back to the new page added
1065  *  it was preempted by a writer. The writer moved forward onto
1066  *  the new page added by the reader and is about to move forward
1067  *  again.
1068  *
1069  *  You can see, it is legitimate for the previous pointer of
1070  *  the head (or any page) not to point back to itself. But only
1071  *  temporarily.
1072  */
1073
1074 #define RB_PAGE_NORMAL          0UL
1075 #define RB_PAGE_HEAD            1UL
1076 #define RB_PAGE_UPDATE          2UL
1077
1078
1079 #define RB_FLAG_MASK            3UL
1080
1081 /* PAGE_MOVED is not part of the mask */
1082 #define RB_PAGE_MOVED           4UL
1083
1084 /*
1085  * rb_list_head - remove any bit
1086  */
1087 static struct list_head *rb_list_head(struct list_head *list)
1088 {
1089         unsigned long val = (unsigned long)list;
1090
1091         return (struct list_head *)(val & ~RB_FLAG_MASK);
1092 }
1093
1094 /*
1095  * rb_is_head_page - test if the given page is the head page
1096  *
1097  * Because the reader may move the head_page pointer, we can
1098  * not trust what the head page is (it may be pointing to
1099  * the reader page). But if the next page is a header page,
1100  * its flags will be non zero.
1101  */
1102 static inline int
1103 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1104                 struct buffer_page *page, struct list_head *list)
1105 {
1106         unsigned long val;
1107
1108         val = (unsigned long)list->next;
1109
1110         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1111                 return RB_PAGE_MOVED;
1112
1113         return val & RB_FLAG_MASK;
1114 }
1115
1116 /*
1117  * rb_is_reader_page
1118  *
1119  * The unique thing about the reader page, is that, if the
1120  * writer is ever on it, the previous pointer never points
1121  * back to the reader page.
1122  */
1123 static bool rb_is_reader_page(struct buffer_page *page)
1124 {
1125         struct list_head *list = page->list.prev;
1126
1127         return rb_list_head(list->next) != &page->list;
1128 }
1129
1130 /*
1131  * rb_set_list_to_head - set a list_head to be pointing to head.
1132  */
1133 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
1134                                 struct list_head *list)
1135 {
1136         unsigned long *ptr;
1137
1138         ptr = (unsigned long *)&list->next;
1139         *ptr |= RB_PAGE_HEAD;
1140         *ptr &= ~RB_PAGE_UPDATE;
1141 }
1142
1143 /*
1144  * rb_head_page_activate - sets up head page
1145  */
1146 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1147 {
1148         struct buffer_page *head;
1149
1150         head = cpu_buffer->head_page;
1151         if (!head)
1152                 return;
1153
1154         /*
1155          * Set the previous list pointer to have the HEAD flag.
1156          */
1157         rb_set_list_to_head(cpu_buffer, head->list.prev);
1158 }
1159
1160 static void rb_list_head_clear(struct list_head *list)
1161 {
1162         unsigned long *ptr = (unsigned long *)&list->next;
1163
1164         *ptr &= ~RB_FLAG_MASK;
1165 }
1166
1167 /*
1168  * rb_head_page_deactivate - clears head page ptr (for free list)
1169  */
1170 static void
1171 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1172 {
1173         struct list_head *hd;
1174
1175         /* Go through the whole list and clear any pointers found. */
1176         rb_list_head_clear(cpu_buffer->pages);
1177
1178         list_for_each(hd, cpu_buffer->pages)
1179                 rb_list_head_clear(hd);
1180 }
1181
1182 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1183                             struct buffer_page *head,
1184                             struct buffer_page *prev,
1185                             int old_flag, int new_flag)
1186 {
1187         struct list_head *list;
1188         unsigned long val = (unsigned long)&head->list;
1189         unsigned long ret;
1190
1191         list = &prev->list;
1192
1193         val &= ~RB_FLAG_MASK;
1194
1195         ret = cmpxchg((unsigned long *)&list->next,
1196                       val | old_flag, val | new_flag);
1197
1198         /* check if the reader took the page */
1199         if ((ret & ~RB_FLAG_MASK) != val)
1200                 return RB_PAGE_MOVED;
1201
1202         return ret & RB_FLAG_MASK;
1203 }
1204
1205 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1206                                    struct buffer_page *head,
1207                                    struct buffer_page *prev,
1208                                    int old_flag)
1209 {
1210         return rb_head_page_set(cpu_buffer, head, prev,
1211                                 old_flag, RB_PAGE_UPDATE);
1212 }
1213
1214 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1215                                  struct buffer_page *head,
1216                                  struct buffer_page *prev,
1217                                  int old_flag)
1218 {
1219         return rb_head_page_set(cpu_buffer, head, prev,
1220                                 old_flag, RB_PAGE_HEAD);
1221 }
1222
1223 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1224                                    struct buffer_page *head,
1225                                    struct buffer_page *prev,
1226                                    int old_flag)
1227 {
1228         return rb_head_page_set(cpu_buffer, head, prev,
1229                                 old_flag, RB_PAGE_NORMAL);
1230 }
1231
1232 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
1233                                struct buffer_page **bpage)
1234 {
1235         struct list_head *p = rb_list_head((*bpage)->list.next);
1236
1237         *bpage = list_entry(p, struct buffer_page, list);
1238 }
1239
1240 static struct buffer_page *
1241 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1242 {
1243         struct buffer_page *head;
1244         struct buffer_page *page;
1245         struct list_head *list;
1246         int i;
1247
1248         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1249                 return NULL;
1250
1251         /* sanity check */
1252         list = cpu_buffer->pages;
1253         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1254                 return NULL;
1255
1256         page = head = cpu_buffer->head_page;
1257         /*
1258          * It is possible that the writer moves the header behind
1259          * where we started, and we miss in one loop.
1260          * A second loop should grab the header, but we'll do
1261          * three loops just because I'm paranoid.
1262          */
1263         for (i = 0; i < 3; i++) {
1264                 do {
1265                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1266                                 cpu_buffer->head_page = page;
1267                                 return page;
1268                         }
1269                         rb_inc_page(cpu_buffer, &page);
1270                 } while (page != head);
1271         }
1272
1273         RB_WARN_ON(cpu_buffer, 1);
1274
1275         return NULL;
1276 }
1277
1278 static int rb_head_page_replace(struct buffer_page *old,
1279                                 struct buffer_page *new)
1280 {
1281         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1282         unsigned long val;
1283         unsigned long ret;
1284
1285         val = *ptr & ~RB_FLAG_MASK;
1286         val |= RB_PAGE_HEAD;
1287
1288         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1289
1290         return ret == val;
1291 }
1292
1293 /*
1294  * rb_tail_page_update - move the tail page forward
1295  */
1296 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1297                                struct buffer_page *tail_page,
1298                                struct buffer_page *next_page)
1299 {
1300         unsigned long old_entries;
1301         unsigned long old_write;
1302
1303         /*
1304          * The tail page now needs to be moved forward.
1305          *
1306          * We need to reset the tail page, but without messing
1307          * with possible erasing of data brought in by interrupts
1308          * that have moved the tail page and are currently on it.
1309          *
1310          * We add a counter to the write field to denote this.
1311          */
1312         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1313         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1314
1315         local_inc(&cpu_buffer->pages_touched);
1316         /*
1317          * Just make sure we have seen our old_write and synchronize
1318          * with any interrupts that come in.
1319          */
1320         barrier();
1321
1322         /*
1323          * If the tail page is still the same as what we think
1324          * it is, then it is up to us to update the tail
1325          * pointer.
1326          */
1327         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1328                 /* Zero the write counter */
1329                 unsigned long val = old_write & ~RB_WRITE_MASK;
1330                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1331
1332                 /*
1333                  * This will only succeed if an interrupt did
1334                  * not come in and change it. In which case, we
1335                  * do not want to modify it.
1336                  *
1337                  * We add (void) to let the compiler know that we do not care
1338                  * about the return value of these functions. We use the
1339                  * cmpxchg to only update if an interrupt did not already
1340                  * do it for us. If the cmpxchg fails, we don't care.
1341                  */
1342                 (void)local_cmpxchg(&next_page->write, old_write, val);
1343                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1344
1345                 /*
1346                  * No need to worry about races with clearing out the commit.
1347                  * it only can increment when a commit takes place. But that
1348                  * only happens in the outer most nested commit.
1349                  */
1350                 local_set(&next_page->page->commit, 0);
1351
1352                 /* Again, either we update tail_page or an interrupt does */
1353                 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1354         }
1355 }
1356
1357 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1358                           struct buffer_page *bpage)
1359 {
1360         unsigned long val = (unsigned long)bpage;
1361
1362         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1363                 return 1;
1364
1365         return 0;
1366 }
1367
1368 /**
1369  * rb_check_list - make sure a pointer to a list has the last bits zero
1370  */
1371 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1372                          struct list_head *list)
1373 {
1374         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1375                 return 1;
1376         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1377                 return 1;
1378         return 0;
1379 }
1380
1381 /**
1382  * rb_check_pages - integrity check of buffer pages
1383  * @cpu_buffer: CPU buffer with pages to test
1384  *
1385  * As a safety measure we check to make sure the data pages have not
1386  * been corrupted.
1387  */
1388 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1389 {
1390         struct list_head *head = cpu_buffer->pages;
1391         struct buffer_page *bpage, *tmp;
1392
1393         /* Reset the head page if it exists */
1394         if (cpu_buffer->head_page)
1395                 rb_set_head_page(cpu_buffer);
1396
1397         rb_head_page_deactivate(cpu_buffer);
1398
1399         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1400                 return -1;
1401         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1402                 return -1;
1403
1404         if (rb_check_list(cpu_buffer, head))
1405                 return -1;
1406
1407         list_for_each_entry_safe(bpage, tmp, head, list) {
1408                 if (RB_WARN_ON(cpu_buffer,
1409                                bpage->list.next->prev != &bpage->list))
1410                         return -1;
1411                 if (RB_WARN_ON(cpu_buffer,
1412                                bpage->list.prev->next != &bpage->list))
1413                         return -1;
1414                 if (rb_check_list(cpu_buffer, &bpage->list))
1415                         return -1;
1416         }
1417
1418         rb_head_page_activate(cpu_buffer);
1419
1420         return 0;
1421 }
1422
1423 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1424 {
1425         struct buffer_page *bpage, *tmp;
1426         bool user_thread = current->mm != NULL;
1427         gfp_t mflags;
1428         long i;
1429
1430         /*
1431          * Check if the available memory is there first.
1432          * Note, si_mem_available() only gives us a rough estimate of available
1433          * memory. It may not be accurate. But we don't care, we just want
1434          * to prevent doing any allocation when it is obvious that it is
1435          * not going to succeed.
1436          */
1437         i = si_mem_available();
1438         if (i < nr_pages)
1439                 return -ENOMEM;
1440
1441         /*
1442          * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1443          * gracefully without invoking oom-killer and the system is not
1444          * destabilized.
1445          */
1446         mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1447
1448         /*
1449          * If a user thread allocates too much, and si_mem_available()
1450          * reports there's enough memory, even though there is not.
1451          * Make sure the OOM killer kills this thread. This can happen
1452          * even with RETRY_MAYFAIL because another task may be doing
1453          * an allocation after this task has taken all memory.
1454          * This is the task the OOM killer needs to take out during this
1455          * loop, even if it was triggered by an allocation somewhere else.
1456          */
1457         if (user_thread)
1458                 set_current_oom_origin();
1459         for (i = 0; i < nr_pages; i++) {
1460                 struct page *page;
1461
1462                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1463                                     mflags, cpu_to_node(cpu));
1464                 if (!bpage)
1465                         goto free_pages;
1466
1467                 list_add(&bpage->list, pages);
1468
1469                 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1470                 if (!page)
1471                         goto free_pages;
1472                 bpage->page = page_address(page);
1473                 rb_init_page(bpage->page);
1474
1475                 if (user_thread && fatal_signal_pending(current))
1476                         goto free_pages;
1477         }
1478         if (user_thread)
1479                 clear_current_oom_origin();
1480
1481         return 0;
1482
1483 free_pages:
1484         list_for_each_entry_safe(bpage, tmp, pages, list) {
1485                 list_del_init(&bpage->list);
1486                 free_buffer_page(bpage);
1487         }
1488         if (user_thread)
1489                 clear_current_oom_origin();
1490
1491         return -ENOMEM;
1492 }
1493
1494 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1495                              unsigned long nr_pages)
1496 {
1497         LIST_HEAD(pages);
1498
1499         WARN_ON(!nr_pages);
1500
1501         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1502                 return -ENOMEM;
1503
1504         /*
1505          * The ring buffer page list is a circular list that does not
1506          * start and end with a list head. All page list items point to
1507          * other pages.
1508          */
1509         cpu_buffer->pages = pages.next;
1510         list_del(&pages);
1511
1512         cpu_buffer->nr_pages = nr_pages;
1513
1514         rb_check_pages(cpu_buffer);
1515
1516         return 0;
1517 }
1518
1519 static struct ring_buffer_per_cpu *
1520 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1521 {
1522         struct ring_buffer_per_cpu *cpu_buffer;
1523         struct buffer_page *bpage;
1524         struct page *page;
1525         int ret;
1526
1527         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1528                                   GFP_KERNEL, cpu_to_node(cpu));
1529         if (!cpu_buffer)
1530                 return NULL;
1531
1532         cpu_buffer->cpu = cpu;
1533         cpu_buffer->buffer = buffer;
1534         raw_spin_lock_init(&cpu_buffer->reader_lock);
1535         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1536         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1537         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1538         init_completion(&cpu_buffer->update_done);
1539         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1540         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1541         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1542
1543         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1544                             GFP_KERNEL, cpu_to_node(cpu));
1545         if (!bpage)
1546                 goto fail_free_buffer;
1547
1548         rb_check_bpage(cpu_buffer, bpage);
1549
1550         cpu_buffer->reader_page = bpage;
1551         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1552         if (!page)
1553                 goto fail_free_reader;
1554         bpage->page = page_address(page);
1555         rb_init_page(bpage->page);
1556
1557         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1558         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1559
1560         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1561         if (ret < 0)
1562                 goto fail_free_reader;
1563
1564         cpu_buffer->head_page
1565                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1566         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1567
1568         rb_head_page_activate(cpu_buffer);
1569
1570         return cpu_buffer;
1571
1572  fail_free_reader:
1573         free_buffer_page(cpu_buffer->reader_page);
1574
1575  fail_free_buffer:
1576         kfree(cpu_buffer);
1577         return NULL;
1578 }
1579
1580 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1581 {
1582         struct list_head *head = cpu_buffer->pages;
1583         struct buffer_page *bpage, *tmp;
1584
1585         free_buffer_page(cpu_buffer->reader_page);
1586
1587         rb_head_page_deactivate(cpu_buffer);
1588
1589         if (head) {
1590                 list_for_each_entry_safe(bpage, tmp, head, list) {
1591                         list_del_init(&bpage->list);
1592                         free_buffer_page(bpage);
1593                 }
1594                 bpage = list_entry(head, struct buffer_page, list);
1595                 free_buffer_page(bpage);
1596         }
1597
1598         kfree(cpu_buffer);
1599 }
1600
1601 /**
1602  * __ring_buffer_alloc - allocate a new ring_buffer
1603  * @size: the size in bytes per cpu that is needed.
1604  * @flags: attributes to set for the ring buffer.
1605  * @key: ring buffer reader_lock_key.
1606  *
1607  * Currently the only flag that is available is the RB_FL_OVERWRITE
1608  * flag. This flag means that the buffer will overwrite old data
1609  * when the buffer wraps. If this flag is not set, the buffer will
1610  * drop data when the tail hits the head.
1611  */
1612 struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1613                                         struct lock_class_key *key)
1614 {
1615         struct trace_buffer *buffer;
1616         long nr_pages;
1617         int bsize;
1618         int cpu;
1619         int ret;
1620
1621         /* keep it in its own cache line */
1622         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1623                          GFP_KERNEL);
1624         if (!buffer)
1625                 return NULL;
1626
1627         if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1628                 goto fail_free_buffer;
1629
1630         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1631         buffer->flags = flags;
1632         buffer->clock = trace_clock_local;
1633         buffer->reader_lock_key = key;
1634
1635         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1636         init_waitqueue_head(&buffer->irq_work.waiters);
1637
1638         /* need at least two pages */
1639         if (nr_pages < 2)
1640                 nr_pages = 2;
1641
1642         buffer->cpus = nr_cpu_ids;
1643
1644         bsize = sizeof(void *) * nr_cpu_ids;
1645         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1646                                   GFP_KERNEL);
1647         if (!buffer->buffers)
1648                 goto fail_free_cpumask;
1649
1650         cpu = raw_smp_processor_id();
1651         cpumask_set_cpu(cpu, buffer->cpumask);
1652         buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1653         if (!buffer->buffers[cpu])
1654                 goto fail_free_buffers;
1655
1656         ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1657         if (ret < 0)
1658                 goto fail_free_buffers;
1659
1660         mutex_init(&buffer->mutex);
1661
1662         return buffer;
1663
1664  fail_free_buffers:
1665         for_each_buffer_cpu(buffer, cpu) {
1666                 if (buffer->buffers[cpu])
1667                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1668         }
1669         kfree(buffer->buffers);
1670
1671  fail_free_cpumask:
1672         free_cpumask_var(buffer->cpumask);
1673
1674  fail_free_buffer:
1675         kfree(buffer);
1676         return NULL;
1677 }
1678 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1679
1680 /**
1681  * ring_buffer_free - free a ring buffer.
1682  * @buffer: the buffer to free.
1683  */
1684 void
1685 ring_buffer_free(struct trace_buffer *buffer)
1686 {
1687         int cpu;
1688
1689         cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1690
1691         for_each_buffer_cpu(buffer, cpu)
1692                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1693
1694         kfree(buffer->buffers);
1695         free_cpumask_var(buffer->cpumask);
1696
1697         kfree(buffer);
1698 }
1699 EXPORT_SYMBOL_GPL(ring_buffer_free);
1700
1701 void ring_buffer_set_clock(struct trace_buffer *buffer,
1702                            u64 (*clock)(void))
1703 {
1704         buffer->clock = clock;
1705 }
1706
1707 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1708 {
1709         buffer->time_stamp_abs = abs;
1710 }
1711
1712 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1713 {
1714         return buffer->time_stamp_abs;
1715 }
1716
1717 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1718
1719 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1720 {
1721         return local_read(&bpage->entries) & RB_WRITE_MASK;
1722 }
1723
1724 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1725 {
1726         return local_read(&bpage->write) & RB_WRITE_MASK;
1727 }
1728
1729 static int
1730 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1731 {
1732         struct list_head *tail_page, *to_remove, *next_page;
1733         struct buffer_page *to_remove_page, *tmp_iter_page;
1734         struct buffer_page *last_page, *first_page;
1735         unsigned long nr_removed;
1736         unsigned long head_bit;
1737         int page_entries;
1738
1739         head_bit = 0;
1740
1741         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1742         atomic_inc(&cpu_buffer->record_disabled);
1743         /*
1744          * We don't race with the readers since we have acquired the reader
1745          * lock. We also don't race with writers after disabling recording.
1746          * This makes it easy to figure out the first and the last page to be
1747          * removed from the list. We unlink all the pages in between including
1748          * the first and last pages. This is done in a busy loop so that we
1749          * lose the least number of traces.
1750          * The pages are freed after we restart recording and unlock readers.
1751          */
1752         tail_page = &cpu_buffer->tail_page->list;
1753
1754         /*
1755          * tail page might be on reader page, we remove the next page
1756          * from the ring buffer
1757          */
1758         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1759                 tail_page = rb_list_head(tail_page->next);
1760         to_remove = tail_page;
1761
1762         /* start of pages to remove */
1763         first_page = list_entry(rb_list_head(to_remove->next),
1764                                 struct buffer_page, list);
1765
1766         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1767                 to_remove = rb_list_head(to_remove)->next;
1768                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1769         }
1770
1771         next_page = rb_list_head(to_remove)->next;
1772
1773         /*
1774          * Now we remove all pages between tail_page and next_page.
1775          * Make sure that we have head_bit value preserved for the
1776          * next page
1777          */
1778         tail_page->next = (struct list_head *)((unsigned long)next_page |
1779                                                 head_bit);
1780         next_page = rb_list_head(next_page);
1781         next_page->prev = tail_page;
1782
1783         /* make sure pages points to a valid page in the ring buffer */
1784         cpu_buffer->pages = next_page;
1785
1786         /* update head page */
1787         if (head_bit)
1788                 cpu_buffer->head_page = list_entry(next_page,
1789                                                 struct buffer_page, list);
1790
1791         /*
1792          * change read pointer to make sure any read iterators reset
1793          * themselves
1794          */
1795         cpu_buffer->read = 0;
1796
1797         /* pages are removed, resume tracing and then free the pages */
1798         atomic_dec(&cpu_buffer->record_disabled);
1799         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1800
1801         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1802
1803         /* last buffer page to remove */
1804         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1805                                 list);
1806         tmp_iter_page = first_page;
1807
1808         do {
1809                 cond_resched();
1810
1811                 to_remove_page = tmp_iter_page;
1812                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1813
1814                 /* update the counters */
1815                 page_entries = rb_page_entries(to_remove_page);
1816                 if (page_entries) {
1817                         /*
1818                          * If something was added to this page, it was full
1819                          * since it is not the tail page. So we deduct the
1820                          * bytes consumed in ring buffer from here.
1821                          * Increment overrun to account for the lost events.
1822                          */
1823                         local_add(page_entries, &cpu_buffer->overrun);
1824                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1825                 }
1826
1827                 /*
1828                  * We have already removed references to this list item, just
1829                  * free up the buffer_page and its page
1830                  */
1831                 free_buffer_page(to_remove_page);
1832                 nr_removed--;
1833
1834         } while (to_remove_page != last_page);
1835
1836         RB_WARN_ON(cpu_buffer, nr_removed);
1837
1838         return nr_removed == 0;
1839 }
1840
1841 static int
1842 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1843 {
1844         struct list_head *pages = &cpu_buffer->new_pages;
1845         int retries, success;
1846
1847         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1848         /*
1849          * We are holding the reader lock, so the reader page won't be swapped
1850          * in the ring buffer. Now we are racing with the writer trying to
1851          * move head page and the tail page.
1852          * We are going to adapt the reader page update process where:
1853          * 1. We first splice the start and end of list of new pages between
1854          *    the head page and its previous page.
1855          * 2. We cmpxchg the prev_page->next to point from head page to the
1856          *    start of new pages list.
1857          * 3. Finally, we update the head->prev to the end of new list.
1858          *
1859          * We will try this process 10 times, to make sure that we don't keep
1860          * spinning.
1861          */
1862         retries = 10;
1863         success = 0;
1864         while (retries--) {
1865                 struct list_head *head_page, *prev_page, *r;
1866                 struct list_head *last_page, *first_page;
1867                 struct list_head *head_page_with_bit;
1868
1869                 head_page = &rb_set_head_page(cpu_buffer)->list;
1870                 if (!head_page)
1871                         break;
1872                 prev_page = head_page->prev;
1873
1874                 first_page = pages->next;
1875                 last_page  = pages->prev;
1876
1877                 head_page_with_bit = (struct list_head *)
1878                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1879
1880                 last_page->next = head_page_with_bit;
1881                 first_page->prev = prev_page;
1882
1883                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1884
1885                 if (r == head_page_with_bit) {
1886                         /*
1887                          * yay, we replaced the page pointer to our new list,
1888                          * now, we just have to update to head page's prev
1889                          * pointer to point to end of list
1890                          */
1891                         head_page->prev = last_page;
1892                         success = 1;
1893                         break;
1894                 }
1895         }
1896
1897         if (success)
1898                 INIT_LIST_HEAD(pages);
1899         /*
1900          * If we weren't successful in adding in new pages, warn and stop
1901          * tracing
1902          */
1903         RB_WARN_ON(cpu_buffer, !success);
1904         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1905
1906         /* free pages if they weren't inserted */
1907         if (!success) {
1908                 struct buffer_page *bpage, *tmp;
1909                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1910                                          list) {
1911                         list_del_init(&bpage->list);
1912                         free_buffer_page(bpage);
1913                 }
1914         }
1915         return success;
1916 }
1917
1918 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1919 {
1920         int success;
1921
1922         if (cpu_buffer->nr_pages_to_update > 0)
1923                 success = rb_insert_pages(cpu_buffer);
1924         else
1925                 success = rb_remove_pages(cpu_buffer,
1926                                         -cpu_buffer->nr_pages_to_update);
1927
1928         if (success)
1929                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1930 }
1931
1932 static void update_pages_handler(struct work_struct *work)
1933 {
1934         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1935                         struct ring_buffer_per_cpu, update_pages_work);
1936         rb_update_pages(cpu_buffer);
1937         complete(&cpu_buffer->update_done);
1938 }
1939
1940 /**
1941  * ring_buffer_resize - resize the ring buffer
1942  * @buffer: the buffer to resize.
1943  * @size: the new size.
1944  * @cpu_id: the cpu buffer to resize
1945  *
1946  * Minimum size is 2 * BUF_PAGE_SIZE.
1947  *
1948  * Returns 0 on success and < 0 on failure.
1949  */
1950 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
1951                         int cpu_id)
1952 {
1953         struct ring_buffer_per_cpu *cpu_buffer;
1954         unsigned long nr_pages;
1955         int cpu, err = 0;
1956
1957         /*
1958          * Always succeed at resizing a non-existent buffer:
1959          */
1960         if (!buffer)
1961                 return size;
1962
1963         /* Make sure the requested buffer exists */
1964         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1965             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1966                 return size;
1967
1968         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1969
1970         /* we need a minimum of two pages */
1971         if (nr_pages < 2)
1972                 nr_pages = 2;
1973
1974         size = nr_pages * BUF_PAGE_SIZE;
1975
1976         /* prevent another thread from changing buffer sizes */
1977         mutex_lock(&buffer->mutex);
1978
1979
1980         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1981                 /*
1982                  * Don't succeed if resizing is disabled, as a reader might be
1983                  * manipulating the ring buffer and is expecting a sane state while
1984                  * this is true.
1985                  */
1986                 for_each_buffer_cpu(buffer, cpu) {
1987                         cpu_buffer = buffer->buffers[cpu];
1988                         if (atomic_read(&cpu_buffer->resize_disabled)) {
1989                                 err = -EBUSY;
1990                                 goto out_err_unlock;
1991                         }
1992                 }
1993
1994                 /* calculate the pages to update */
1995                 for_each_buffer_cpu(buffer, cpu) {
1996                         cpu_buffer = buffer->buffers[cpu];
1997
1998                         cpu_buffer->nr_pages_to_update = nr_pages -
1999                                                         cpu_buffer->nr_pages;
2000                         /*
2001                          * nothing more to do for removing pages or no update
2002                          */
2003                         if (cpu_buffer->nr_pages_to_update <= 0)
2004                                 continue;
2005                         /*
2006                          * to add pages, make sure all new pages can be
2007                          * allocated without receiving ENOMEM
2008                          */
2009                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
2010                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
2011                                                 &cpu_buffer->new_pages, cpu)) {
2012                                 /* not enough memory for new pages */
2013                                 err = -ENOMEM;
2014                                 goto out_err;
2015                         }
2016                 }
2017
2018                 get_online_cpus();
2019                 /*
2020                  * Fire off all the required work handlers
2021                  * We can't schedule on offline CPUs, but it's not necessary
2022                  * since we can change their buffer sizes without any race.
2023                  */
2024                 for_each_buffer_cpu(buffer, cpu) {
2025                         cpu_buffer = buffer->buffers[cpu];
2026                         if (!cpu_buffer->nr_pages_to_update)
2027                                 continue;
2028
2029                         /* Can't run something on an offline CPU. */
2030                         if (!cpu_online(cpu)) {
2031                                 rb_update_pages(cpu_buffer);
2032                                 cpu_buffer->nr_pages_to_update = 0;
2033                         } else {
2034                                 schedule_work_on(cpu,
2035                                                 &cpu_buffer->update_pages_work);
2036                         }
2037                 }
2038
2039                 /* wait for all the updates to complete */
2040                 for_each_buffer_cpu(buffer, cpu) {
2041                         cpu_buffer = buffer->buffers[cpu];
2042                         if (!cpu_buffer->nr_pages_to_update)
2043                                 continue;
2044
2045                         if (cpu_online(cpu))
2046                                 wait_for_completion(&cpu_buffer->update_done);
2047                         cpu_buffer->nr_pages_to_update = 0;
2048                 }
2049
2050                 put_online_cpus();
2051         } else {
2052                 /* Make sure this CPU has been initialized */
2053                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
2054                         goto out;
2055
2056                 cpu_buffer = buffer->buffers[cpu_id];
2057
2058                 if (nr_pages == cpu_buffer->nr_pages)
2059                         goto out;
2060
2061                 /*
2062                  * Don't succeed if resizing is disabled, as a reader might be
2063                  * manipulating the ring buffer and is expecting a sane state while
2064                  * this is true.
2065                  */
2066                 if (atomic_read(&cpu_buffer->resize_disabled)) {
2067                         err = -EBUSY;
2068                         goto out_err_unlock;
2069                 }
2070
2071                 cpu_buffer->nr_pages_to_update = nr_pages -
2072                                                 cpu_buffer->nr_pages;
2073
2074                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
2075                 if (cpu_buffer->nr_pages_to_update > 0 &&
2076                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
2077                                             &cpu_buffer->new_pages, cpu_id)) {
2078                         err = -ENOMEM;
2079                         goto out_err;
2080                 }
2081
2082                 get_online_cpus();
2083
2084                 /* Can't run something on an offline CPU. */
2085                 if (!cpu_online(cpu_id))
2086                         rb_update_pages(cpu_buffer);
2087                 else {
2088                         schedule_work_on(cpu_id,
2089                                          &cpu_buffer->update_pages_work);
2090                         wait_for_completion(&cpu_buffer->update_done);
2091                 }
2092
2093                 cpu_buffer->nr_pages_to_update = 0;
2094                 put_online_cpus();
2095         }
2096
2097  out:
2098         /*
2099          * The ring buffer resize can happen with the ring buffer
2100          * enabled, so that the update disturbs the tracing as little
2101          * as possible. But if the buffer is disabled, we do not need
2102          * to worry about that, and we can take the time to verify
2103          * that the buffer is not corrupt.
2104          */
2105         if (atomic_read(&buffer->record_disabled)) {
2106                 atomic_inc(&buffer->record_disabled);
2107                 /*
2108                  * Even though the buffer was disabled, we must make sure
2109                  * that it is truly disabled before calling rb_check_pages.
2110                  * There could have been a race between checking
2111                  * record_disable and incrementing it.
2112                  */
2113                 synchronize_rcu();
2114                 for_each_buffer_cpu(buffer, cpu) {
2115                         cpu_buffer = buffer->buffers[cpu];
2116                         rb_check_pages(cpu_buffer);
2117                 }
2118                 atomic_dec(&buffer->record_disabled);
2119         }
2120
2121         mutex_unlock(&buffer->mutex);
2122         return size;
2123
2124  out_err:
2125         for_each_buffer_cpu(buffer, cpu) {
2126                 struct buffer_page *bpage, *tmp;
2127
2128                 cpu_buffer = buffer->buffers[cpu];
2129                 cpu_buffer->nr_pages_to_update = 0;
2130
2131                 if (list_empty(&cpu_buffer->new_pages))
2132                         continue;
2133
2134                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2135                                         list) {
2136                         list_del_init(&bpage->list);
2137                         free_buffer_page(bpage);
2138                 }
2139         }
2140  out_err_unlock:
2141         mutex_unlock(&buffer->mutex);
2142         return err;
2143 }
2144 EXPORT_SYMBOL_GPL(ring_buffer_resize);
2145
2146 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2147 {
2148         mutex_lock(&buffer->mutex);
2149         if (val)
2150                 buffer->flags |= RB_FL_OVERWRITE;
2151         else
2152                 buffer->flags &= ~RB_FL_OVERWRITE;
2153         mutex_unlock(&buffer->mutex);
2154 }
2155 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2156
2157 static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2158 {
2159         return bpage->page->data + index;
2160 }
2161
2162 static __always_inline struct ring_buffer_event *
2163 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2164 {
2165         return __rb_page_index(cpu_buffer->reader_page,
2166                                cpu_buffer->reader_page->read);
2167 }
2168
2169 static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
2170 {
2171         return local_read(&bpage->page->commit);
2172 }
2173
2174 static struct ring_buffer_event *
2175 rb_iter_head_event(struct ring_buffer_iter *iter)
2176 {
2177         struct ring_buffer_event *event;
2178         struct buffer_page *iter_head_page = iter->head_page;
2179         unsigned long commit;
2180         unsigned length;
2181
2182         if (iter->head != iter->next_event)
2183                 return iter->event;
2184
2185         /*
2186          * When the writer goes across pages, it issues a cmpxchg which
2187          * is a mb(), which will synchronize with the rmb here.
2188          * (see rb_tail_page_update() and __rb_reserve_next())
2189          */
2190         commit = rb_page_commit(iter_head_page);
2191         smp_rmb();
2192         event = __rb_page_index(iter_head_page, iter->head);
2193         length = rb_event_length(event);
2194
2195         /*
2196          * READ_ONCE() doesn't work on functions and we don't want the
2197          * compiler doing any crazy optimizations with length.
2198          */
2199         barrier();
2200
2201         if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2202                 /* Writer corrupted the read? */
2203                 goto reset;
2204
2205         memcpy(iter->event, event, length);
2206         /*
2207          * If the page stamp is still the same after this rmb() then the
2208          * event was safely copied without the writer entering the page.
2209          */
2210         smp_rmb();
2211
2212         /* Make sure the page didn't change since we read this */
2213         if (iter->page_stamp != iter_head_page->page->time_stamp ||
2214             commit > rb_page_commit(iter_head_page))
2215                 goto reset;
2216
2217         iter->next_event = iter->head + length;
2218         return iter->event;
2219  reset:
2220         /* Reset to the beginning */
2221         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2222         iter->head = 0;
2223         iter->next_event = 0;
2224         iter->missed_events = 1;
2225         return NULL;
2226 }
2227
2228 /* Size is determined by what has been committed */
2229 static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2230 {
2231         return rb_page_commit(bpage);
2232 }
2233
2234 static __always_inline unsigned
2235 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2236 {
2237         return rb_page_commit(cpu_buffer->commit_page);
2238 }
2239
2240 static __always_inline unsigned
2241 rb_event_index(struct ring_buffer_event *event)
2242 {
2243         unsigned long addr = (unsigned long)event;
2244
2245         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2246 }
2247
2248 static void rb_inc_iter(struct ring_buffer_iter *iter)
2249 {
2250         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2251
2252         /*
2253          * The iterator could be on the reader page (it starts there).
2254          * But the head could have moved, since the reader was
2255          * found. Check for this case and assign the iterator
2256          * to the head page instead of next.
2257          */
2258         if (iter->head_page == cpu_buffer->reader_page)
2259                 iter->head_page = rb_set_head_page(cpu_buffer);
2260         else
2261                 rb_inc_page(cpu_buffer, &iter->head_page);
2262
2263         iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2264         iter->head = 0;
2265         iter->next_event = 0;
2266 }
2267
2268 /*
2269  * rb_handle_head_page - writer hit the head page
2270  *
2271  * Returns: +1 to retry page
2272  *           0 to continue
2273  *          -1 on error
2274  */
2275 static int
2276 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2277                     struct buffer_page *tail_page,
2278                     struct buffer_page *next_page)
2279 {
2280         struct buffer_page *new_head;
2281         int entries;
2282         int type;
2283         int ret;
2284
2285         entries = rb_page_entries(next_page);
2286
2287         /*
2288          * The hard part is here. We need to move the head
2289          * forward, and protect against both readers on
2290          * other CPUs and writers coming in via interrupts.
2291          */
2292         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2293                                        RB_PAGE_HEAD);
2294
2295         /*
2296          * type can be one of four:
2297          *  NORMAL - an interrupt already moved it for us
2298          *  HEAD   - we are the first to get here.
2299          *  UPDATE - we are the interrupt interrupting
2300          *           a current move.
2301          *  MOVED  - a reader on another CPU moved the next
2302          *           pointer to its reader page. Give up
2303          *           and try again.
2304          */
2305
2306         switch (type) {
2307         case RB_PAGE_HEAD:
2308                 /*
2309                  * We changed the head to UPDATE, thus
2310                  * it is our responsibility to update
2311                  * the counters.
2312                  */
2313                 local_add(entries, &cpu_buffer->overrun);
2314                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2315
2316                 /*
2317                  * The entries will be zeroed out when we move the
2318                  * tail page.
2319                  */
2320
2321                 /* still more to do */
2322                 break;
2323
2324         case RB_PAGE_UPDATE:
2325                 /*
2326                  * This is an interrupt that interrupt the
2327                  * previous update. Still more to do.
2328                  */
2329                 break;
2330         case RB_PAGE_NORMAL:
2331                 /*
2332                  * An interrupt came in before the update
2333                  * and processed this for us.
2334                  * Nothing left to do.
2335                  */
2336                 return 1;
2337         case RB_PAGE_MOVED:
2338                 /*
2339                  * The reader is on another CPU and just did
2340                  * a swap with our next_page.
2341                  * Try again.
2342                  */
2343                 return 1;
2344         default:
2345                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2346                 return -1;
2347         }
2348
2349         /*
2350          * Now that we are here, the old head pointer is
2351          * set to UPDATE. This will keep the reader from
2352          * swapping the head page with the reader page.
2353          * The reader (on another CPU) will spin till
2354          * we are finished.
2355          *
2356          * We just need to protect against interrupts
2357          * doing the job. We will set the next pointer
2358          * to HEAD. After that, we set the old pointer
2359          * to NORMAL, but only if it was HEAD before.
2360          * otherwise we are an interrupt, and only
2361          * want the outer most commit to reset it.
2362          */
2363         new_head = next_page;
2364         rb_inc_page(cpu_buffer, &new_head);
2365
2366         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2367                                     RB_PAGE_NORMAL);
2368
2369         /*
2370          * Valid returns are:
2371          *  HEAD   - an interrupt came in and already set it.
2372          *  NORMAL - One of two things:
2373          *            1) We really set it.
2374          *            2) A bunch of interrupts came in and moved
2375          *               the page forward again.
2376          */
2377         switch (ret) {
2378         case RB_PAGE_HEAD:
2379         case RB_PAGE_NORMAL:
2380                 /* OK */
2381                 break;
2382         default:
2383                 RB_WARN_ON(cpu_buffer, 1);
2384                 return -1;
2385         }
2386
2387         /*
2388          * It is possible that an interrupt came in,
2389          * set the head up, then more interrupts came in
2390          * and moved it again. When we get back here,
2391          * the page would have been set to NORMAL but we
2392          * just set it back to HEAD.
2393          *
2394          * How do you detect this? Well, if that happened
2395          * the tail page would have moved.
2396          */
2397         if (ret == RB_PAGE_NORMAL) {
2398                 struct buffer_page *buffer_tail_page;
2399
2400                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2401                 /*
2402                  * If the tail had moved passed next, then we need
2403                  * to reset the pointer.
2404                  */
2405                 if (buffer_tail_page != tail_page &&
2406                     buffer_tail_page != next_page)
2407                         rb_head_page_set_normal(cpu_buffer, new_head,
2408                                                 next_page,
2409                                                 RB_PAGE_HEAD);
2410         }
2411
2412         /*
2413          * If this was the outer most commit (the one that
2414          * changed the original pointer from HEAD to UPDATE),
2415          * then it is up to us to reset it to NORMAL.
2416          */
2417         if (type == RB_PAGE_HEAD) {
2418                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2419                                               tail_page,
2420                                               RB_PAGE_UPDATE);
2421                 if (RB_WARN_ON(cpu_buffer,
2422                                ret != RB_PAGE_UPDATE))
2423                         return -1;
2424         }
2425
2426         return 0;
2427 }
2428
2429 static inline void
2430 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2431               unsigned long tail, struct rb_event_info *info)
2432 {
2433         struct buffer_page *tail_page = info->tail_page;
2434         struct ring_buffer_event *event;
2435         unsigned long length = info->length;
2436
2437         /*
2438          * Only the event that crossed the page boundary
2439          * must fill the old tail_page with padding.
2440          */
2441         if (tail >= BUF_PAGE_SIZE) {
2442                 /*
2443                  * If the page was filled, then we still need
2444                  * to update the real_end. Reset it to zero
2445                  * and the reader will ignore it.
2446                  */
2447                 if (tail == BUF_PAGE_SIZE)
2448                         tail_page->real_end = 0;
2449
2450                 local_sub(length, &tail_page->write);
2451                 return;
2452         }
2453
2454         event = __rb_page_index(tail_page, tail);
2455
2456         /* account for padding bytes */
2457         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2458
2459         /*
2460          * Save the original length to the meta data.
2461          * This will be used by the reader to add lost event
2462          * counter.
2463          */
2464         tail_page->real_end = tail;
2465
2466         /*
2467          * If this event is bigger than the minimum size, then
2468          * we need to be careful that we don't subtract the
2469          * write counter enough to allow another writer to slip
2470          * in on this page.
2471          * We put in a discarded commit instead, to make sure
2472          * that this space is not used again.
2473          *
2474          * If we are less than the minimum size, we don't need to
2475          * worry about it.
2476          */
2477         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2478                 /* No room for any events */
2479
2480                 /* Mark the rest of the page with padding */
2481                 rb_event_set_padding(event);
2482
2483                 /* Set the write back to the previous setting */
2484                 local_sub(length, &tail_page->write);
2485                 return;
2486         }
2487
2488         /* Put in a discarded event */
2489         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2490         event->type_len = RINGBUF_TYPE_PADDING;
2491         /* time delta must be non zero */
2492         event->time_delta = 1;
2493
2494         /* Set write to end of buffer */
2495         length = (tail + length) - BUF_PAGE_SIZE;
2496         local_sub(length, &tail_page->write);
2497 }
2498
2499 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2500
2501 /*
2502  * This is the slow path, force gcc not to inline it.
2503  */
2504 static noinline struct ring_buffer_event *
2505 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2506              unsigned long tail, struct rb_event_info *info)
2507 {
2508         struct buffer_page *tail_page = info->tail_page;
2509         struct buffer_page *commit_page = cpu_buffer->commit_page;
2510         struct trace_buffer *buffer = cpu_buffer->buffer;
2511         struct buffer_page *next_page;
2512         int ret;
2513
2514         next_page = tail_page;
2515
2516         rb_inc_page(cpu_buffer, &next_page);
2517
2518         /*
2519          * If for some reason, we had an interrupt storm that made
2520          * it all the way around the buffer, bail, and warn
2521          * about it.
2522          */
2523         if (unlikely(next_page == commit_page)) {
2524                 local_inc(&cpu_buffer->commit_overrun);
2525                 goto out_reset;
2526         }
2527
2528         /*
2529          * This is where the fun begins!
2530          *
2531          * We are fighting against races between a reader that
2532          * could be on another CPU trying to swap its reader
2533          * page with the buffer head.
2534          *
2535          * We are also fighting against interrupts coming in and
2536          * moving the head or tail on us as well.
2537          *
2538          * If the next page is the head page then we have filled
2539          * the buffer, unless the commit page is still on the
2540          * reader page.
2541          */
2542         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2543
2544                 /*
2545                  * If the commit is not on the reader page, then
2546                  * move the header page.
2547                  */
2548                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2549                         /*
2550                          * If we are not in overwrite mode,
2551                          * this is easy, just stop here.
2552                          */
2553                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2554                                 local_inc(&cpu_buffer->dropped_events);
2555                                 goto out_reset;
2556                         }
2557
2558                         ret = rb_handle_head_page(cpu_buffer,
2559                                                   tail_page,
2560                                                   next_page);
2561                         if (ret < 0)
2562                                 goto out_reset;
2563                         if (ret)
2564                                 goto out_again;
2565                 } else {
2566                         /*
2567                          * We need to be careful here too. The
2568                          * commit page could still be on the reader
2569                          * page. We could have a small buffer, and
2570                          * have filled up the buffer with events
2571                          * from interrupts and such, and wrapped.
2572                          *
2573                          * Note, if the tail page is also the on the
2574                          * reader_page, we let it move out.
2575                          */
2576                         if (unlikely((cpu_buffer->commit_page !=
2577                                       cpu_buffer->tail_page) &&
2578                                      (cpu_buffer->commit_page ==
2579                                       cpu_buffer->reader_page))) {
2580                                 local_inc(&cpu_buffer->commit_overrun);
2581                                 goto out_reset;
2582                         }
2583                 }
2584         }
2585
2586         rb_tail_page_update(cpu_buffer, tail_page, next_page);
2587
2588  out_again:
2589
2590         rb_reset_tail(cpu_buffer, tail, info);
2591
2592         /* Commit what we have for now. */
2593         rb_end_commit(cpu_buffer);
2594         /* rb_end_commit() decs committing */
2595         local_inc(&cpu_buffer->committing);
2596
2597         /* fail and let the caller try again */
2598         return ERR_PTR(-EAGAIN);
2599
2600  out_reset:
2601         /* reset write */
2602         rb_reset_tail(cpu_buffer, tail, info);
2603
2604         return NULL;
2605 }
2606
2607 /* Slow path */
2608 static struct ring_buffer_event *
2609 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2610 {
2611         if (abs)
2612                 event->type_len = RINGBUF_TYPE_TIME_STAMP;
2613         else
2614                 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2615
2616         /* Not the first event on the page, or not delta? */
2617         if (abs || rb_event_index(event)) {
2618                 event->time_delta = delta & TS_MASK;
2619                 event->array[0] = delta >> TS_SHIFT;
2620         } else {
2621                 /* nope, just zero it */
2622                 event->time_delta = 0;
2623                 event->array[0] = 0;
2624         }
2625
2626         return skip_time_extend(event);
2627 }
2628
2629 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2630                                      struct ring_buffer_event *event);
2631
2632 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2633 static inline bool sched_clock_stable(void)
2634 {
2635         return true;
2636 }
2637 #endif
2638
2639 static void
2640 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2641                    struct rb_event_info *info)
2642 {
2643         u64 write_stamp;
2644
2645         WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2646                   (unsigned long long)info->delta,
2647                   (unsigned long long)info->ts,
2648                   (unsigned long long)info->before,
2649                   (unsigned long long)info->after,
2650                   (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2651                   sched_clock_stable() ? "" :
2652                   "If you just came from a suspend/resume,\n"
2653                   "please switch to the trace global clock:\n"
2654                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2655                   "or add trace_clock=global to the kernel command line\n");
2656 }
2657
2658 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2659                                       struct ring_buffer_event **event,
2660                                       struct rb_event_info *info,
2661                                       u64 *delta,
2662                                       unsigned int *length)
2663 {
2664         bool abs = info->add_timestamp &
2665                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2666
2667         if (unlikely(info->delta > (1ULL << 59))) {
2668                 /* did the clock go backwards */
2669                 if (info->before == info->after && info->before > info->ts) {
2670                         /* not interrupted */
2671                         static int once;
2672
2673                         /*
2674                          * This is possible with a recalibrating of the TSC.
2675                          * Do not produce a call stack, but just report it.
2676                          */
2677                         if (!once) {
2678                                 once++;
2679                                 pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2680                                         info->before, info->ts);
2681                         }
2682                 } else
2683                         rb_check_timestamp(cpu_buffer, info);
2684                 if (!abs)
2685                         info->delta = 0;
2686         }
2687         *event = rb_add_time_stamp(*event, info->delta, abs);
2688         *length -= RB_LEN_TIME_EXTEND;
2689         *delta = 0;
2690 }
2691
2692 /**
2693  * rb_update_event - update event type and data
2694  * @cpu_buffer: The per cpu buffer of the @event
2695  * @event: the event to update
2696  * @info: The info to update the @event with (contains length and delta)
2697  *
2698  * Update the type and data fields of the @event. The length
2699  * is the actual size that is written to the ring buffer,
2700  * and with this, we can determine what to place into the
2701  * data field.
2702  */
2703 static void
2704 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2705                 struct ring_buffer_event *event,
2706                 struct rb_event_info *info)
2707 {
2708         unsigned length = info->length;
2709         u64 delta = info->delta;
2710
2711         /*
2712          * If we need to add a timestamp, then we
2713          * add it to the start of the reserved space.
2714          */
2715         if (unlikely(info->add_timestamp))
2716                 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2717
2718         event->time_delta = delta;
2719         length -= RB_EVNT_HDR_SIZE;
2720         if (length > RB_MAX_SMALL_DATA) {
2721                 event->type_len = 0;
2722                 event->array[0] = length;
2723         } else
2724                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2725 }
2726
2727 static unsigned rb_calculate_event_length(unsigned length)
2728 {
2729         struct ring_buffer_event event; /* Used only for sizeof array */
2730
2731         /* zero length can cause confusions */
2732         if (!length)
2733                 length++;
2734
2735         if (length > RB_MAX_SMALL_DATA)
2736                 length += sizeof(event.array[0]);
2737
2738         length += RB_EVNT_HDR_SIZE;
2739         length = ALIGN(length, RB_ALIGNMENT);
2740
2741         /*
2742          * In case the time delta is larger than the 27 bits for it
2743          * in the header, we need to add a timestamp. If another
2744          * event comes in when trying to discard this one to increase
2745          * the length, then the timestamp will be added in the allocated
2746          * space of this event. If length is bigger than the size needed
2747          * for the TIME_EXTEND, then padding has to be used. The events
2748          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2749          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2750          * As length is a multiple of 4, we only need to worry if it
2751          * is 12 (RB_LEN_TIME_EXTEND + 4).
2752          */
2753         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2754                 length += RB_ALIGNMENT;
2755
2756         return length;
2757 }
2758
2759 static __always_inline bool
2760 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2761                    struct ring_buffer_event *event)
2762 {
2763         unsigned long addr = (unsigned long)event;
2764         unsigned long index;
2765
2766         index = rb_event_index(event);
2767         addr &= PAGE_MASK;
2768
2769         return cpu_buffer->commit_page->page == (void *)addr &&
2770                 rb_commit_index(cpu_buffer) == index;
2771 }
2772
2773 static u64 rb_time_delta(struct ring_buffer_event *event)
2774 {
2775         switch (event->type_len) {
2776         case RINGBUF_TYPE_PADDING:
2777                 return 0;
2778
2779         case RINGBUF_TYPE_TIME_EXTEND:
2780                 return ring_buffer_event_time_stamp(event);
2781
2782         case RINGBUF_TYPE_TIME_STAMP:
2783                 return 0;
2784
2785         case RINGBUF_TYPE_DATA:
2786                 return event->time_delta;
2787         default:
2788                 return 0;
2789         }
2790 }
2791
2792 static inline int
2793 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2794                   struct ring_buffer_event *event)
2795 {
2796         unsigned long new_index, old_index;
2797         struct buffer_page *bpage;
2798         unsigned long index;
2799         unsigned long addr;
2800         u64 write_stamp;
2801         u64 delta;
2802
2803         new_index = rb_event_index(event);
2804         old_index = new_index + rb_event_ts_length(event);
2805         addr = (unsigned long)event;
2806         addr &= PAGE_MASK;
2807
2808         bpage = READ_ONCE(cpu_buffer->tail_page);
2809
2810         delta = rb_time_delta(event);
2811
2812         if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
2813                 return 0;
2814
2815         /* Make sure the write stamp is read before testing the location */
2816         barrier();
2817
2818         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2819                 unsigned long write_mask =
2820                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2821                 unsigned long event_length = rb_event_length(event);
2822
2823                 /* Something came in, can't discard */
2824                 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
2825                                        write_stamp, write_stamp - delta))
2826                         return 0;
2827
2828                 /*
2829                  * If an event were to come in now, it would see that the
2830                  * write_stamp and the before_stamp are different, and assume
2831                  * that this event just added itself before updating
2832                  * the write stamp. The interrupting event will fix the
2833                  * write stamp for us, and use the before stamp as its delta.
2834                  */
2835
2836                 /*
2837                  * This is on the tail page. It is possible that
2838                  * a write could come in and move the tail page
2839                  * and write to the next page. That is fine
2840                  * because we just shorten what is on this page.
2841                  */
2842                 old_index += write_mask;
2843                 new_index += write_mask;
2844                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2845                 if (index == old_index) {
2846                         /* update counters */
2847                         local_sub(event_length, &cpu_buffer->entries_bytes);
2848                         return 1;
2849                 }
2850         }
2851
2852         /* could not discard */
2853         return 0;
2854 }
2855
2856 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2857 {
2858         local_inc(&cpu_buffer->committing);
2859         local_inc(&cpu_buffer->commits);
2860 }
2861
2862 static __always_inline void
2863 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2864 {
2865         unsigned long max_count;
2866
2867         /*
2868          * We only race with interrupts and NMIs on this CPU.
2869          * If we own the commit event, then we can commit
2870          * all others that interrupted us, since the interruptions
2871          * are in stack format (they finish before they come
2872          * back to us). This allows us to do a simple loop to
2873          * assign the commit to the tail.
2874          */
2875  again:
2876         max_count = cpu_buffer->nr_pages * 100;
2877
2878         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2879                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2880                         return;
2881                 if (RB_WARN_ON(cpu_buffer,
2882                                rb_is_reader_page(cpu_buffer->tail_page)))
2883                         return;
2884                 local_set(&cpu_buffer->commit_page->page->commit,
2885                           rb_page_write(cpu_buffer->commit_page));
2886                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2887                 /* add barrier to keep gcc from optimizing too much */
2888                 barrier();
2889         }
2890         while (rb_commit_index(cpu_buffer) !=
2891                rb_page_write(cpu_buffer->commit_page)) {
2892
2893                 local_set(&cpu_buffer->commit_page->page->commit,
2894                           rb_page_write(cpu_buffer->commit_page));
2895                 RB_WARN_ON(cpu_buffer,
2896                            local_read(&cpu_buffer->commit_page->page->commit) &
2897                            ~RB_WRITE_MASK);
2898                 barrier();
2899         }
2900
2901         /* again, keep gcc from optimizing */
2902         barrier();
2903
2904         /*
2905          * If an interrupt came in just after the first while loop
2906          * and pushed the tail page forward, we will be left with
2907          * a dangling commit that will never go forward.
2908          */
2909         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2910                 goto again;
2911 }
2912
2913 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2914 {
2915         unsigned long commits;
2916
2917         if (RB_WARN_ON(cpu_buffer,
2918                        !local_read(&cpu_buffer->committing)))
2919                 return;
2920
2921  again:
2922         commits = local_read(&cpu_buffer->commits);
2923         /* synchronize with interrupts */
2924         barrier();
2925         if (local_read(&cpu_buffer->committing) == 1)
2926                 rb_set_commit_to_write(cpu_buffer);
2927
2928         local_dec(&cpu_buffer->committing);
2929
2930         /* synchronize with interrupts */
2931         barrier();
2932
2933         /*
2934          * Need to account for interrupts coming in between the
2935          * updating of the commit page and the clearing of the
2936          * committing counter.
2937          */
2938         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2939             !local_read(&cpu_buffer->committing)) {
2940                 local_inc(&cpu_buffer->committing);
2941                 goto again;
2942         }
2943 }
2944
2945 static inline void rb_event_discard(struct ring_buffer_event *event)
2946 {
2947         if (extended_time(event))
2948                 event = skip_time_extend(event);
2949
2950         /* array[0] holds the actual length for the discarded event */
2951         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2952         event->type_len = RINGBUF_TYPE_PADDING;
2953         /* time delta must be non zero */
2954         if (!event->time_delta)
2955                 event->time_delta = 1;
2956 }
2957
2958 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2959                       struct ring_buffer_event *event)
2960 {
2961         local_inc(&cpu_buffer->entries);
2962         rb_end_commit(cpu_buffer);
2963 }
2964
2965 static __always_inline void
2966 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2967 {
2968         size_t nr_pages;
2969         size_t dirty;
2970         size_t full;
2971
2972         if (buffer->irq_work.waiters_pending) {
2973                 buffer->irq_work.waiters_pending = false;
2974                 /* irq_work_queue() supplies it's own memory barriers */
2975                 irq_work_queue(&buffer->irq_work.work);
2976         }
2977
2978         if (cpu_buffer->irq_work.waiters_pending) {
2979                 cpu_buffer->irq_work.waiters_pending = false;
2980                 /* irq_work_queue() supplies it's own memory barriers */
2981                 irq_work_queue(&cpu_buffer->irq_work.work);
2982         }
2983
2984         if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2985                 return;
2986
2987         if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2988                 return;
2989
2990         if (!cpu_buffer->irq_work.full_waiters_pending)
2991                 return;
2992
2993         cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2994
2995         full = cpu_buffer->shortest_full;
2996         nr_pages = cpu_buffer->nr_pages;
2997         dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2998         if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2999                 return;
3000
3001         cpu_buffer->irq_work.wakeup_full = true;
3002         cpu_buffer->irq_work.full_waiters_pending = false;
3003         /* irq_work_queue() supplies it's own memory barriers */
3004         irq_work_queue(&cpu_buffer->irq_work.work);
3005 }
3006
3007 /*
3008  * The lock and unlock are done within a preempt disable section.
3009  * The current_context per_cpu variable can only be modified
3010  * by the current task between lock and unlock. But it can
3011  * be modified more than once via an interrupt. To pass this
3012  * information from the lock to the unlock without having to
3013  * access the 'in_interrupt()' functions again (which do show
3014  * a bit of overhead in something as critical as function tracing,
3015  * we use a bitmask trick.
3016  *
3017  *  bit 0 =  NMI context
3018  *  bit 1 =  IRQ context
3019  *  bit 2 =  SoftIRQ context
3020  *  bit 3 =  normal context.
3021  *
3022  * This works because this is the order of contexts that can
3023  * preempt other contexts. A SoftIRQ never preempts an IRQ
3024  * context.
3025  *
3026  * When the context is determined, the corresponding bit is
3027  * checked and set (if it was set, then a recursion of that context
3028  * happened).
3029  *
3030  * On unlock, we need to clear this bit. To do so, just subtract
3031  * 1 from the current_context and AND it to itself.
3032  *
3033  * (binary)
3034  *  101 - 1 = 100
3035  *  101 & 100 = 100 (clearing bit zero)
3036  *
3037  *  1010 - 1 = 1001
3038  *  1010 & 1001 = 1000 (clearing bit 1)
3039  *
3040  * The least significant bit can be cleared this way, and it
3041  * just so happens that it is the same bit corresponding to
3042  * the current context.
3043  */
3044
3045 static __always_inline int
3046 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3047 {
3048         unsigned int val = cpu_buffer->current_context;
3049         unsigned long pc = preempt_count();
3050         int bit;
3051
3052         if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
3053                 bit = RB_CTX_NORMAL;
3054         else
3055                 bit = pc & NMI_MASK ? RB_CTX_NMI :
3056                         pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
3057
3058         if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
3059                 return 1;
3060
3061         val |= (1 << (bit + cpu_buffer->nest));
3062         cpu_buffer->current_context = val;
3063
3064         return 0;
3065 }
3066
3067 static __always_inline void
3068 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3069 {
3070         cpu_buffer->current_context &=
3071                 cpu_buffer->current_context - (1 << cpu_buffer->nest);
3072 }
3073
3074 /* The recursive locking above uses 4 bits */
3075 #define NESTED_BITS 4
3076
3077 /**
3078  * ring_buffer_nest_start - Allow to trace while nested
3079  * @buffer: The ring buffer to modify
3080  *
3081  * The ring buffer has a safety mechanism to prevent recursion.
3082  * But there may be a case where a trace needs to be done while
3083  * tracing something else. In this case, calling this function
3084  * will allow this function to nest within a currently active
3085  * ring_buffer_lock_reserve().
3086  *
3087  * Call this function before calling another ring_buffer_lock_reserve() and
3088  * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3089  */
3090 void ring_buffer_nest_start(struct trace_buffer *buffer)
3091 {
3092         struct ring_buffer_per_cpu *cpu_buffer;
3093         int cpu;
3094
3095         /* Enabled by ring_buffer_nest_end() */
3096         preempt_disable_notrace();
3097         cpu = raw_smp_processor_id();
3098         cpu_buffer = buffer->buffers[cpu];
3099         /* This is the shift value for the above recursive locking */
3100         cpu_buffer->nest += NESTED_BITS;
3101 }
3102
3103 /**
3104  * ring_buffer_nest_end - Allow to trace while nested
3105  * @buffer: The ring buffer to modify
3106  *
3107  * Must be called after ring_buffer_nest_start() and after the
3108  * ring_buffer_unlock_commit().
3109  */
3110 void ring_buffer_nest_end(struct trace_buffer *buffer)
3111 {
3112         struct ring_buffer_per_cpu *cpu_buffer;
3113         int cpu;
3114
3115         /* disabled by ring_buffer_nest_start() */
3116         cpu = raw_smp_processor_id();
3117         cpu_buffer = buffer->buffers[cpu];
3118         /* This is the shift value for the above recursive locking */
3119         cpu_buffer->nest -= NESTED_BITS;
3120         preempt_enable_notrace();
3121 }
3122
3123 /**
3124  * ring_buffer_unlock_commit - commit a reserved
3125  * @buffer: The buffer to commit to
3126  * @event: The event pointer to commit.
3127  *
3128  * This commits the data to the ring buffer, and releases any locks held.
3129  *
3130  * Must be paired with ring_buffer_lock_reserve.
3131  */
3132 int ring_buffer_unlock_commit(struct trace_buffer *buffer,
3133                               struct ring_buffer_event *event)
3134 {
3135         struct ring_buffer_per_cpu *cpu_buffer;
3136         int cpu = raw_smp_processor_id();
3137
3138         cpu_buffer = buffer->buffers[cpu];
3139
3140         rb_commit(cpu_buffer, event);
3141
3142         rb_wakeups(buffer, cpu_buffer);
3143
3144         trace_recursive_unlock(cpu_buffer);
3145
3146         preempt_enable_notrace();
3147
3148         return 0;
3149 }
3150 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3151
3152 static struct ring_buffer_event *
3153 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3154                   struct rb_event_info *info)
3155 {
3156         struct ring_buffer_event *event;
3157         struct buffer_page *tail_page;
3158         unsigned long tail, write, w;
3159         bool a_ok;
3160         bool b_ok;
3161
3162         /* Don't let the compiler play games with cpu_buffer->tail_page */
3163         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3164
3165  /*A*/  w = local_read(&tail_page->write) & RB_WRITE_MASK;
3166         barrier();
3167         b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3168         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3169         barrier();
3170         info->ts = rb_time_stamp(cpu_buffer->buffer);
3171
3172         if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3173                 info->delta = info->ts;
3174         } else {
3175                 /*
3176                  * If interrupting an event time update, we may need an
3177                  * absolute timestamp.
3178                  * Don't bother if this is the start of a new page (w == 0).
3179                  */
3180                 if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3181                         info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3182                         info->length += RB_LEN_TIME_EXTEND;
3183                 } else {
3184                         info->delta = info->ts - info->after;
3185                         if (unlikely(test_time_stamp(info->delta))) {
3186                                 info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3187                                 info->length += RB_LEN_TIME_EXTEND;
3188                         }
3189                 }
3190         }
3191
3192  /*B*/  rb_time_set(&cpu_buffer->before_stamp, info->ts);
3193
3194  /*C*/  write = local_add_return(info->length, &tail_page->write);
3195
3196         /* set write to only the index of the write */
3197         write &= RB_WRITE_MASK;
3198
3199         tail = write - info->length;
3200
3201         /* See if we shot pass the end of this buffer page */
3202         if (unlikely(write > BUF_PAGE_SIZE)) {
3203                 if (tail != w) {
3204                         /* before and after may now different, fix it up*/
3205                         b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3206                         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3207                         if (a_ok && b_ok && info->before != info->after)
3208                                 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3209                                                       info->before, info->after);
3210                 }
3211                 return rb_move_tail(cpu_buffer, tail, info);
3212         }
3213
3214         if (likely(tail == w)) {
3215                 u64 save_before;
3216                 bool s_ok;
3217
3218                 /* Nothing interrupted us between A and C */
3219  /*D*/          rb_time_set(&cpu_buffer->write_stamp, info->ts);
3220                 barrier();
3221  /*E*/          s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3222                 RB_WARN_ON(cpu_buffer, !s_ok);
3223                 if (likely(!(info->add_timestamp &
3224                              (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3225                         /* This did not interrupt any time update */
3226                         info->delta = info->ts - info->after;
3227                 else
3228                         /* Just use full timestamp for inerrupting event */
3229                         info->delta = info->ts;
3230                 barrier();
3231                 if (unlikely(info->ts != save_before)) {
3232                         /* SLOW PATH - Interrupted between C and E */
3233
3234                         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3235                         RB_WARN_ON(cpu_buffer, !a_ok);
3236
3237                         /* Write stamp must only go forward */
3238                         if (save_before > info->after) {
3239                                 /*
3240                                  * We do not care about the result, only that
3241                                  * it gets updated atomically.
3242                                  */
3243                                 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3244                                                       info->after, save_before);
3245                         }
3246                 }
3247         } else {
3248                 u64 ts;
3249                 /* SLOW PATH - Interrupted between A and C */
3250                 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3251                 /* Was interrupted before here, write_stamp must be valid */
3252                 RB_WARN_ON(cpu_buffer, !a_ok);
3253                 ts = rb_time_stamp(cpu_buffer->buffer);
3254                 barrier();
3255  /*E*/          if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3256                     info->after < ts) {
3257                         /* Nothing came after this event between C and E */
3258                         info->delta = ts - info->after;
3259                         (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3260                                               info->after, info->ts);
3261                         info->ts = ts;
3262                 } else {
3263                         /*
3264                          * Interrupted beween C and E:
3265                          * Lost the previous events time stamp. Just set the
3266                          * delta to zero, and this will be the same time as
3267                          * the event this event interrupted. And the events that
3268                          * came after this will still be correct (as they would
3269                          * have built their delta on the previous event.
3270                          */
3271                         info->delta = 0;
3272                 }
3273                 info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3274         }
3275
3276         /*
3277          * If this is the first commit on the page, then it has the same
3278          * timestamp as the page itself.
3279          */
3280         if (unlikely(!tail && !(info->add_timestamp &
3281                                 (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3282                 info->delta = 0;
3283
3284         /* We reserved something on the buffer */
3285
3286         event = __rb_page_index(tail_page, tail);
3287         rb_update_event(cpu_buffer, event, info);
3288
3289         local_inc(&tail_page->entries);
3290
3291         /*
3292          * If this is the first commit on the page, then update
3293          * its timestamp.
3294          */
3295         if (unlikely(!tail))
3296                 tail_page->page->time_stamp = info->ts;
3297
3298         /* account for these added bytes */
3299         local_add(info->length, &cpu_buffer->entries_bytes);
3300
3301         return event;
3302 }
3303
3304 static __always_inline struct ring_buffer_event *
3305 rb_reserve_next_event(struct trace_buffer *buffer,
3306                       struct ring_buffer_per_cpu *cpu_buffer,
3307                       unsigned long length)
3308 {
3309         struct ring_buffer_event *event;
3310         struct rb_event_info info;
3311         int nr_loops = 0;
3312         int add_ts_default;
3313
3314         rb_start_commit(cpu_buffer);
3315         /* The commit page can not change after this */
3316
3317 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3318         /*
3319          * Due to the ability to swap a cpu buffer from a buffer
3320          * it is possible it was swapped before we committed.
3321          * (committing stops a swap). We check for it here and
3322          * if it happened, we have to fail the write.
3323          */
3324         barrier();
3325         if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3326                 local_dec(&cpu_buffer->committing);
3327                 local_dec(&cpu_buffer->commits);
3328                 return NULL;
3329         }
3330 #endif
3331
3332         info.length = rb_calculate_event_length(length);
3333
3334         if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3335                 add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3336                 info.length += RB_LEN_TIME_EXTEND;
3337         } else {
3338                 add_ts_default = RB_ADD_STAMP_NONE;
3339         }
3340
3341  again:
3342         info.add_timestamp = add_ts_default;
3343         info.delta = 0;
3344
3345         /*
3346          * We allow for interrupts to reenter here and do a trace.
3347          * If one does, it will cause this original code to loop
3348          * back here. Even with heavy interrupts happening, this
3349          * should only happen a few times in a row. If this happens
3350          * 1000 times in a row, there must be either an interrupt
3351          * storm or we have something buggy.
3352          * Bail!
3353          */
3354         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3355                 goto out_fail;
3356
3357         event = __rb_reserve_next(cpu_buffer, &info);
3358
3359         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3360                 if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3361                         info.length -= RB_LEN_TIME_EXTEND;
3362                 goto again;
3363         }
3364
3365         if (likely(event))
3366                 return event;
3367  out_fail:
3368         rb_end_commit(cpu_buffer);
3369         return NULL;
3370 }
3371
3372 /**
3373  * ring_buffer_lock_reserve - reserve a part of the buffer
3374  * @buffer: the ring buffer to reserve from
3375  * @length: the length of the data to reserve (excluding event header)
3376  *
3377  * Returns a reserved event on the ring buffer to copy directly to.
3378  * The user of this interface will need to get the body to write into
3379  * and can use the ring_buffer_event_data() interface.
3380  *
3381  * The length is the length of the data needed, not the event length
3382  * which also includes the event header.
3383  *
3384  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3385  * If NULL is returned, then nothing has been allocated or locked.
3386  */
3387 struct ring_buffer_event *
3388 ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3389 {
3390         struct ring_buffer_per_cpu *cpu_buffer;
3391         struct ring_buffer_event *event;
3392         int cpu;
3393
3394         /* If we are tracing schedule, we don't want to recurse */
3395         preempt_disable_notrace();
3396
3397         if (unlikely(atomic_read(&buffer->record_disabled)))
3398                 goto out;
3399
3400         cpu = raw_smp_processor_id();
3401
3402         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3403                 goto out;
3404
3405         cpu_buffer = buffer->buffers[cpu];
3406
3407         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3408                 goto out;
3409
3410         if (unlikely(length > BUF_MAX_DATA_SIZE))
3411                 goto out;
3412
3413         if (unlikely(trace_recursive_lock(cpu_buffer)))
3414                 goto out;
3415
3416         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3417         if (!event)
3418                 goto out_unlock;
3419
3420         return event;
3421
3422  out_unlock:
3423         trace_recursive_unlock(cpu_buffer);
3424  out:
3425         preempt_enable_notrace();
3426         return NULL;
3427 }
3428 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3429
3430 /*
3431  * Decrement the entries to the page that an event is on.
3432  * The event does not even need to exist, only the pointer
3433  * to the page it is on. This may only be called before the commit
3434  * takes place.
3435  */
3436 static inline void
3437 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3438                    struct ring_buffer_event *event)
3439 {
3440         unsigned long addr = (unsigned long)event;
3441         struct buffer_page *bpage = cpu_buffer->commit_page;
3442         struct buffer_page *start;
3443
3444         addr &= PAGE_MASK;
3445
3446         /* Do the likely case first */
3447         if (likely(bpage->page == (void *)addr)) {
3448                 local_dec(&bpage->entries);
3449                 return;
3450         }
3451
3452         /*
3453          * Because the commit page may be on the reader page we
3454          * start with the next page and check the end loop there.
3455          */
3456         rb_inc_page(cpu_buffer, &bpage);
3457         start = bpage;
3458         do {
3459                 if (bpage->page == (void *)addr) {
3460                         local_dec(&bpage->entries);
3461                         return;
3462                 }
3463                 rb_inc_page(cpu_buffer, &bpage);
3464         } while (bpage != start);
3465
3466         /* commit not part of this buffer?? */
3467         RB_WARN_ON(cpu_buffer, 1);
3468 }
3469
3470 /**
3471  * ring_buffer_commit_discard - discard an event that has not been committed
3472  * @buffer: the ring buffer
3473  * @event: non committed event to discard
3474  *
3475  * Sometimes an event that is in the ring buffer needs to be ignored.
3476  * This function lets the user discard an event in the ring buffer
3477  * and then that event will not be read later.
3478  *
3479  * This function only works if it is called before the item has been
3480  * committed. It will try to free the event from the ring buffer
3481  * if another event has not been added behind it.
3482  *
3483  * If another event has been added behind it, it will set the event
3484  * up as discarded, and perform the commit.
3485  *
3486  * If this function is called, do not call ring_buffer_unlock_commit on
3487  * the event.
3488  */
3489 void ring_buffer_discard_commit(struct trace_buffer *buffer,
3490                                 struct ring_buffer_event *event)
3491 {
3492         struct ring_buffer_per_cpu *cpu_buffer;
3493         int cpu;
3494
3495         /* The event is discarded regardless */
3496         rb_event_discard(event);
3497
3498         cpu = smp_processor_id();
3499         cpu_buffer = buffer->buffers[cpu];
3500
3501         /*
3502          * This must only be called if the event has not been
3503          * committed yet. Thus we can assume that preemption
3504          * is still disabled.
3505          */
3506         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3507
3508         rb_decrement_entry(cpu_buffer, event);
3509         if (rb_try_to_discard(cpu_buffer, event))
3510                 goto out;
3511
3512  out:
3513         rb_end_commit(cpu_buffer);
3514
3515         trace_recursive_unlock(cpu_buffer);
3516
3517         preempt_enable_notrace();
3518
3519 }
3520 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3521
3522 /**
3523  * ring_buffer_write - write data to the buffer without reserving
3524  * @buffer: The ring buffer to write to.
3525  * @length: The length of the data being written (excluding the event header)
3526  * @data: The data to write to the buffer.
3527  *
3528  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3529  * one function. If you already have the data to write to the buffer, it
3530  * may be easier to simply call this function.
3531  *
3532  * Note, like ring_buffer_lock_reserve, the length is the length of the data
3533  * and not the length of the event which would hold the header.
3534  */
3535 int ring_buffer_write(struct trace_buffer *buffer,
3536                       unsigned long length,
3537                       void *data)
3538 {
3539         struct ring_buffer_per_cpu *cpu_buffer;
3540         struct ring_buffer_event *event;
3541         void *body;
3542         int ret = -EBUSY;
3543         int cpu;
3544
3545         preempt_disable_notrace();
3546
3547         if (atomic_read(&buffer->record_disabled))
3548                 goto out;
3549
3550         cpu = raw_smp_processor_id();
3551
3552         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3553                 goto out;
3554
3555         cpu_buffer = buffer->buffers[cpu];
3556
3557         if (atomic_read(&cpu_buffer->record_disabled))
3558                 goto out;
3559
3560         if (length > BUF_MAX_DATA_SIZE)
3561                 goto out;
3562
3563         if (unlikely(trace_recursive_lock(cpu_buffer)))
3564                 goto out;
3565
3566         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3567         if (!event)
3568                 goto out_unlock;
3569
3570         body = rb_event_data(event);
3571
3572         memcpy(body, data, length);
3573
3574         rb_commit(cpu_buffer, event);
3575
3576         rb_wakeups(buffer, cpu_buffer);
3577
3578         ret = 0;
3579
3580  out_unlock:
3581         trace_recursive_unlock(cpu_buffer);
3582
3583  out:
3584         preempt_enable_notrace();
3585
3586         return ret;
3587 }
3588 EXPORT_SYMBOL_GPL(ring_buffer_write);
3589
3590 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3591 {
3592         struct buffer_page *reader = cpu_buffer->reader_page;
3593         struct buffer_page *head = rb_set_head_page(cpu_buffer);
3594         struct buffer_page *commit = cpu_buffer->commit_page;
3595
3596         /* In case of error, head will be NULL */
3597         if (unlikely(!head))
3598                 return true;
3599
3600         return reader->read == rb_page_commit(reader) &&
3601                 (commit == reader ||
3602                  (commit == head &&
3603                   head->read == rb_page_commit(commit)));
3604 }
3605
3606 /**
3607  * ring_buffer_record_disable - stop all writes into the buffer
3608  * @buffer: The ring buffer to stop writes to.
3609  *
3610  * This prevents all writes to the buffer. Any attempt to write
3611  * to the buffer after this will fail and return NULL.
3612  *
3613  * The caller should call synchronize_rcu() after this.
3614  */
3615 void ring_buffer_record_disable(struct trace_buffer *buffer)
3616 {
3617         atomic_inc(&buffer->record_disabled);
3618 }
3619 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3620
3621 /**
3622  * ring_buffer_record_enable - enable writes to the buffer
3623  * @buffer: The ring buffer to enable writes
3624  *
3625  * Note, multiple disables will need the same number of enables
3626  * to truly enable the writing (much like preempt_disable).
3627  */
3628 void ring_buffer_record_enable(struct trace_buffer *buffer)
3629 {
3630         atomic_dec(&buffer->record_disabled);
3631 }
3632 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3633
3634 /**
3635  * ring_buffer_record_off - stop all writes into the buffer
3636  * @buffer: The ring buffer to stop writes to.
3637  *
3638  * This prevents all writes to the buffer. Any attempt to write
3639  * to the buffer after this will fail and return NULL.
3640  *
3641  * This is different than ring_buffer_record_disable() as
3642  * it works like an on/off switch, where as the disable() version
3643  * must be paired with a enable().
3644  */
3645 void ring_buffer_record_off(struct trace_buffer *buffer)
3646 {
3647         unsigned int rd;
3648         unsigned int new_rd;
3649
3650         do {
3651                 rd = atomic_read(&buffer->record_disabled);
3652                 new_rd = rd | RB_BUFFER_OFF;
3653         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3654 }
3655 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3656
3657 /**
3658  * ring_buffer_record_on - restart writes into the buffer
3659  * @buffer: The ring buffer to start writes to.
3660  *
3661  * This enables all writes to the buffer that was disabled by
3662  * ring_buffer_record_off().
3663  *
3664  * This is different than ring_buffer_record_enable() as
3665  * it works like an on/off switch, where as the enable() version
3666  * must be paired with a disable().
3667  */
3668 void ring_buffer_record_on(struct trace_buffer *buffer)
3669 {
3670         unsigned int rd;
3671         unsigned int new_rd;
3672
3673         do {
3674                 rd = atomic_read(&buffer->record_disabled);
3675                 new_rd = rd & ~RB_BUFFER_OFF;
3676         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3677 }
3678 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3679
3680 /**
3681  * ring_buffer_record_is_on - return true if the ring buffer can write
3682  * @buffer: The ring buffer to see if write is enabled
3683  *
3684  * Returns true if the ring buffer is in a state that it accepts writes.
3685  */
3686 bool ring_buffer_record_is_on(struct trace_buffer *buffer)
3687 {
3688         return !atomic_read(&buffer->record_disabled);
3689 }
3690
3691 /**
3692  * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3693  * @buffer: The ring buffer to see if write is set enabled
3694  *
3695  * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3696  * Note that this does NOT mean it is in a writable state.
3697  *
3698  * It may return true when the ring buffer has been disabled by
3699  * ring_buffer_record_disable(), as that is a temporary disabling of
3700  * the ring buffer.
3701  */
3702 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
3703 {
3704         return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3705 }
3706
3707 /**
3708  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3709  * @buffer: The ring buffer to stop writes to.
3710  * @cpu: The CPU buffer to stop
3711  *
3712  * This prevents all writes to the buffer. Any attempt to write
3713  * to the buffer after this will fail and return NULL.
3714  *
3715  * The caller should call synchronize_rcu() after this.
3716  */
3717 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
3718 {
3719         struct ring_buffer_per_cpu *cpu_buffer;
3720
3721         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3722                 return;
3723
3724         cpu_buffer = buffer->buffers[cpu];
3725         atomic_inc(&cpu_buffer->record_disabled);
3726 }
3727 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3728
3729 /**
3730  * ring_buffer_record_enable_cpu - enable writes to the buffer
3731  * @buffer: The ring buffer to enable writes
3732  * @cpu: The CPU to enable.
3733  *
3734  * Note, multiple disables will need the same number of enables
3735  * to truly enable the writing (much like preempt_disable).
3736  */
3737 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
3738 {
3739         struct ring_buffer_per_cpu *cpu_buffer;
3740
3741         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3742                 return;
3743
3744         cpu_buffer = buffer->buffers[cpu];
3745         atomic_dec(&cpu_buffer->record_disabled);
3746 }
3747 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3748
3749 /*
3750  * The total entries in the ring buffer is the running counter
3751  * of entries entered into the ring buffer, minus the sum of
3752  * the entries read from the ring buffer and the number of
3753  * entries that were overwritten.
3754  */
3755 static inline unsigned long
3756 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3757 {
3758         return local_read(&cpu_buffer->entries) -
3759                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3760 }
3761
3762 /**
3763  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3764  * @buffer: The ring buffer
3765  * @cpu: The per CPU buffer to read from.
3766  */
3767 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
3768 {
3769         unsigned long flags;
3770         struct ring_buffer_per_cpu *cpu_buffer;
3771         struct buffer_page *bpage;
3772         u64 ret = 0;
3773
3774         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3775                 return 0;
3776
3777         cpu_buffer = buffer->buffers[cpu];
3778         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3779         /*
3780          * if the tail is on reader_page, oldest time stamp is on the reader
3781          * page
3782          */
3783         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3784                 bpage = cpu_buffer->reader_page;
3785         else
3786                 bpage = rb_set_head_page(cpu_buffer);
3787         if (bpage)
3788                 ret = bpage->page->time_stamp;
3789         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3790
3791         return ret;
3792 }
3793 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3794
3795 /**
3796  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3797  * @buffer: The ring buffer
3798  * @cpu: The per CPU buffer to read from.
3799  */
3800 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
3801 {
3802         struct ring_buffer_per_cpu *cpu_buffer;
3803         unsigned long ret;
3804
3805         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3806                 return 0;
3807
3808         cpu_buffer = buffer->buffers[cpu];
3809         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3810
3811         return ret;
3812 }
3813 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3814
3815 /**
3816  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3817  * @buffer: The ring buffer
3818  * @cpu: The per CPU buffer to get the entries from.
3819  */
3820 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
3821 {
3822         struct ring_buffer_per_cpu *cpu_buffer;
3823
3824         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3825                 return 0;
3826
3827         cpu_buffer = buffer->buffers[cpu];
3828
3829         return rb_num_of_entries(cpu_buffer);
3830 }
3831 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3832
3833 /**
3834  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3835  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3836  * @buffer: The ring buffer
3837  * @cpu: The per CPU buffer to get the number of overruns from
3838  */
3839 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
3840 {
3841         struct ring_buffer_per_cpu *cpu_buffer;
3842         unsigned long ret;
3843
3844         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3845                 return 0;
3846
3847         cpu_buffer = buffer->buffers[cpu];
3848         ret = local_read(&cpu_buffer->overrun);
3849
3850         return ret;
3851 }
3852 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3853
3854 /**
3855  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3856  * commits failing due to the buffer wrapping around while there are uncommitted
3857  * events, such as during an interrupt storm.
3858  * @buffer: The ring buffer
3859  * @cpu: The per CPU buffer to get the number of overruns from
3860  */
3861 unsigned long
3862 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
3863 {
3864         struct ring_buffer_per_cpu *cpu_buffer;
3865         unsigned long ret;
3866
3867         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3868                 return 0;
3869
3870         cpu_buffer = buffer->buffers[cpu];
3871         ret = local_read(&cpu_buffer->commit_overrun);
3872
3873         return ret;
3874 }
3875 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3876
3877 /**
3878  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3879  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3880  * @buffer: The ring buffer
3881  * @cpu: The per CPU buffer to get the number of overruns from
3882  */
3883 unsigned long
3884 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
3885 {
3886         struct ring_buffer_per_cpu *cpu_buffer;
3887         unsigned long ret;
3888
3889         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3890                 return 0;
3891
3892         cpu_buffer = buffer->buffers[cpu];
3893         ret = local_read(&cpu_buffer->dropped_events);
3894
3895         return ret;
3896 }
3897 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3898
3899 /**
3900  * ring_buffer_read_events_cpu - get the number of events successfully read
3901  * @buffer: The ring buffer
3902  * @cpu: The per CPU buffer to get the number of events read
3903  */
3904 unsigned long
3905 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
3906 {
3907         struct ring_buffer_per_cpu *cpu_buffer;
3908
3909         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3910                 return 0;
3911
3912         cpu_buffer = buffer->buffers[cpu];
3913         return cpu_buffer->read;
3914 }
3915 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3916
3917 /**
3918  * ring_buffer_entries - get the number of entries in a buffer
3919  * @buffer: The ring buffer
3920  *
3921  * Returns the total number of entries in the ring buffer
3922  * (all CPU entries)
3923  */
3924 unsigned long ring_buffer_entries(struct trace_buffer *buffer)
3925 {
3926         struct ring_buffer_per_cpu *cpu_buffer;
3927         unsigned long entries = 0;
3928         int cpu;
3929
3930         /* if you care about this being correct, lock the buffer */
3931         for_each_buffer_cpu(buffer, cpu) {
3932                 cpu_buffer = buffer->buffers[cpu];
3933                 entries += rb_num_of_entries(cpu_buffer);
3934         }
3935
3936         return entries;
3937 }
3938 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3939
3940 /**
3941  * ring_buffer_overruns - get the number of overruns in buffer
3942  * @buffer: The ring buffer
3943  *
3944  * Returns the total number of overruns in the ring buffer
3945  * (all CPU entries)
3946  */
3947 unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
3948 {
3949         struct ring_buffer_per_cpu *cpu_buffer;
3950         unsigned long overruns = 0;
3951         int cpu;
3952
3953         /* if you care about this being correct, lock the buffer */
3954         for_each_buffer_cpu(buffer, cpu) {
3955                 cpu_buffer = buffer->buffers[cpu];
3956                 overruns += local_read(&cpu_buffer->overrun);
3957         }
3958
3959         return overruns;
3960 }
3961 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3962
3963 static void rb_iter_reset(struct ring_buffer_iter *iter)
3964 {
3965         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3966
3967         /* Iterator usage is expected to have record disabled */
3968         iter->head_page = cpu_buffer->reader_page;
3969         iter->head = cpu_buffer->reader_page->read;
3970         iter->next_event = iter->head;
3971
3972         iter->cache_reader_page = iter->head_page;
3973         iter->cache_read = cpu_buffer->read;
3974
3975         if (iter->head) {
3976                 iter->read_stamp = cpu_buffer->read_stamp;
3977                 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
3978         } else {
3979                 iter->read_stamp = iter->head_page->page->time_stamp;
3980                 iter->page_stamp = iter->read_stamp;
3981         }
3982 }
3983
3984 /**
3985  * ring_buffer_iter_reset - reset an iterator
3986  * @iter: The iterator to reset
3987  *
3988  * Resets the iterator, so that it will start from the beginning
3989  * again.
3990  */
3991 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3992 {
3993         struct ring_buffer_per_cpu *cpu_buffer;
3994         unsigned long flags;
3995
3996         if (!iter)
3997                 return;
3998
3999         cpu_buffer = iter->cpu_buffer;
4000
4001         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4002         rb_iter_reset(iter);
4003         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4004 }
4005 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4006
4007 /**
4008  * ring_buffer_iter_empty - check if an iterator has no more to read
4009  * @iter: The iterator to check
4010  */
4011 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4012 {
4013         struct ring_buffer_per_cpu *cpu_buffer;
4014         struct buffer_page *reader;
4015         struct buffer_page *head_page;
4016         struct buffer_page *commit_page;
4017         struct buffer_page *curr_commit_page;
4018         unsigned commit;
4019         u64 curr_commit_ts;
4020         u64 commit_ts;
4021
4022         cpu_buffer = iter->cpu_buffer;
4023         reader = cpu_buffer->reader_page;
4024         head_page = cpu_buffer->head_page;
4025         commit_page = cpu_buffer->commit_page;
4026         commit_ts = commit_page->page->time_stamp;
4027
4028         /*
4029          * When the writer goes across pages, it issues a cmpxchg which
4030          * is a mb(), which will synchronize with the rmb here.
4031          * (see rb_tail_page_update())
4032          */
4033         smp_rmb();
4034         commit = rb_page_commit(commit_page);
4035         /* We want to make sure that the commit page doesn't change */
4036         smp_rmb();
4037
4038         /* Make sure commit page didn't change */
4039         curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4040         curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4041
4042         /* If the commit page changed, then there's more data */
4043         if (curr_commit_page != commit_page ||
4044             curr_commit_ts != commit_ts)
4045                 return 0;
4046
4047         /* Still racy, as it may return a false positive, but that's OK */
4048         return ((iter->head_page == commit_page && iter->head >= commit) ||
4049                 (iter->head_page == reader && commit_page == head_page &&
4050                  head_page->read == commit &&
4051                  iter->head == rb_page_commit(cpu_buffer->reader_page)));
4052 }
4053 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4054
4055 static void
4056 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4057                      struct ring_buffer_event *event)
4058 {
4059         u64 delta;
4060
4061         switch (event->type_len) {
4062         case RINGBUF_TYPE_PADDING:
4063                 return;
4064
4065         case RINGBUF_TYPE_TIME_EXTEND:
4066                 delta = ring_buffer_event_time_stamp(event);
4067                 cpu_buffer->read_stamp += delta;
4068                 return;
4069
4070         case RINGBUF_TYPE_TIME_STAMP:
4071                 delta = ring_buffer_event_time_stamp(event);
4072                 cpu_buffer->read_stamp = delta;
4073                 return;
4074
4075         case RINGBUF_TYPE_DATA:
4076                 cpu_buffer->read_stamp += event->time_delta;
4077                 return;
4078
4079         default:
4080                 RB_WARN_ON(cpu_buffer, 1);
4081         }
4082         return;
4083 }
4084
4085 static void
4086 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4087                           struct ring_buffer_event *event)
4088 {
4089         u64 delta;
4090
4091         switch (event->type_len) {
4092         case RINGBUF_TYPE_PADDING:
4093                 return;
4094
4095         case RINGBUF_TYPE_TIME_EXTEND:
4096                 delta = ring_buffer_event_time_stamp(event);
4097                 iter->read_stamp += delta;
4098                 return;
4099
4100         case RINGBUF_TYPE_TIME_STAMP:
4101                 delta = ring_buffer_event_time_stamp(event);
4102                 iter->read_stamp = delta;
4103                 return;
4104
4105         case RINGBUF_TYPE_DATA:
4106                 iter->read_stamp += event->time_delta;
4107                 return;
4108
4109         default:
4110                 RB_WARN_ON(iter->cpu_buffer, 1);
4111         }
4112         return;
4113 }
4114
4115 static struct buffer_page *
4116 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4117 {
4118         struct buffer_page *reader = NULL;
4119         unsigned long overwrite;
4120         unsigned long flags;
4121         int nr_loops = 0;
4122         int ret;
4123
4124         local_irq_save(flags);
4125         arch_spin_lock(&cpu_buffer->lock);
4126
4127  again:
4128         /*
4129          * This should normally only loop twice. But because the
4130          * start of the reader inserts an empty page, it causes
4131          * a case where we will loop three times. There should be no
4132          * reason to loop four times (that I know of).
4133          */
4134         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4135                 reader = NULL;
4136                 goto out;
4137         }
4138
4139         reader = cpu_buffer->reader_page;
4140
4141         /* If there's more to read, return this page */
4142         if (cpu_buffer->reader_page->read < rb_page_size(reader))
4143                 goto out;
4144
4145         /* Never should we have an index greater than the size */
4146         if (RB_WARN_ON(cpu_buffer,
4147                        cpu_buffer->reader_page->read > rb_page_size(reader)))
4148                 goto out;
4149
4150         /* check if we caught up to the tail */
4151         reader = NULL;
4152         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4153                 goto out;
4154
4155         /* Don't bother swapping if the ring buffer is empty */
4156         if (rb_num_of_entries(cpu_buffer) == 0)
4157                 goto out;
4158
4159         /*
4160          * Reset the reader page to size zero.
4161          */
4162         local_set(&cpu_buffer->reader_page->write, 0);
4163         local_set(&cpu_buffer->reader_page->entries, 0);
4164         local_set(&cpu_buffer->reader_page->page->commit, 0);
4165         cpu_buffer->reader_page->real_end = 0;
4166
4167  spin:
4168         /*
4169          * Splice the empty reader page into the list around the head.
4170          */
4171         reader = rb_set_head_page(cpu_buffer);
4172         if (!reader)
4173                 goto out;
4174         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4175         cpu_buffer->reader_page->list.prev = reader->list.prev;
4176
4177         /*
4178          * cpu_buffer->pages just needs to point to the buffer, it
4179          *  has no specific buffer page to point to. Lets move it out
4180          *  of our way so we don't accidentally swap it.
4181          */
4182         cpu_buffer->pages = reader->list.prev;
4183
4184         /* The reader page will be pointing to the new head */
4185         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
4186
4187         /*
4188          * We want to make sure we read the overruns after we set up our
4189          * pointers to the next object. The writer side does a
4190          * cmpxchg to cross pages which acts as the mb on the writer
4191          * side. Note, the reader will constantly fail the swap
4192          * while the writer is updating the pointers, so this
4193          * guarantees that the overwrite recorded here is the one we
4194          * want to compare with the last_overrun.
4195          */
4196         smp_mb();
4197         overwrite = local_read(&(cpu_buffer->overrun));
4198
4199         /*
4200          * Here's the tricky part.
4201          *
4202          * We need to move the pointer past the header page.
4203          * But we can only do that if a writer is not currently
4204          * moving it. The page before the header page has the
4205          * flag bit '1' set if it is pointing to the page we want.
4206          * but if the writer is in the process of moving it
4207          * than it will be '2' or already moved '0'.
4208          */
4209
4210         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4211
4212         /*
4213          * If we did not convert it, then we must try again.
4214          */
4215         if (!ret)
4216                 goto spin;
4217
4218         /*
4219          * Yay! We succeeded in replacing the page.
4220          *
4221          * Now make the new head point back to the reader page.
4222          */
4223         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4224         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
4225
4226         local_inc(&cpu_buffer->pages_read);
4227
4228         /* Finally update the reader page to the new head */
4229         cpu_buffer->reader_page = reader;
4230         cpu_buffer->reader_page->read = 0;
4231
4232         if (overwrite != cpu_buffer->last_overrun) {
4233                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4234                 cpu_buffer->last_overrun = overwrite;
4235         }
4236
4237         goto again;
4238
4239  out:
4240         /* Update the read_stamp on the first event */
4241         if (reader && reader->read == 0)
4242                 cpu_buffer->read_stamp = reader->page->time_stamp;
4243
4244         arch_spin_unlock(&cpu_buffer->lock);
4245         local_irq_restore(flags);
4246
4247         return reader;
4248 }
4249
4250 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4251 {
4252         struct ring_buffer_event *event;
4253         struct buffer_page *reader;
4254         unsigned length;
4255
4256         reader = rb_get_reader_page(cpu_buffer);
4257
4258         /* This function should not be called when buffer is empty */
4259         if (RB_WARN_ON(cpu_buffer, !reader))
4260                 return;
4261
4262         event = rb_reader_event(cpu_buffer);
4263
4264         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4265                 cpu_buffer->read++;
4266
4267         rb_update_read_stamp(cpu_buffer, event);
4268
4269         length = rb_event_length(event);
4270         cpu_buffer->reader_page->read += length;
4271 }
4272
4273 static void rb_advance_iter(struct ring_buffer_iter *iter)
4274 {
4275         struct ring_buffer_per_cpu *cpu_buffer;
4276
4277         cpu_buffer = iter->cpu_buffer;
4278
4279         /* If head == next_event then we need to jump to the next event */
4280         if (iter->head == iter->next_event) {
4281                 /* If the event gets overwritten again, there's nothing to do */
4282                 if (rb_iter_head_event(iter) == NULL)
4283                         return;
4284         }
4285
4286         iter->head = iter->next_event;
4287
4288         /*
4289          * Check if we are at the end of the buffer.
4290          */
4291         if (iter->next_event >= rb_page_size(iter->head_page)) {
4292                 /* discarded commits can make the page empty */
4293                 if (iter->head_page == cpu_buffer->commit_page)
4294                         return;
4295                 rb_inc_iter(iter);
4296                 return;
4297         }
4298
4299         rb_update_iter_read_stamp(iter, iter->event);
4300 }
4301
4302 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4303 {
4304         return cpu_buffer->lost_events;
4305 }
4306
4307 static struct ring_buffer_event *
4308 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4309                unsigned long *lost_events)
4310 {
4311         struct ring_buffer_event *event;
4312         struct buffer_page *reader;
4313         int nr_loops = 0;
4314
4315         if (ts)
4316                 *ts = 0;
4317  again:
4318         /*
4319          * We repeat when a time extend is encountered.
4320          * Since the time extend is always attached to a data event,
4321          * we should never loop more than once.
4322          * (We never hit the following condition more than twice).
4323          */
4324         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4325                 return NULL;
4326
4327         reader = rb_get_reader_page(cpu_buffer);
4328         if (!reader)
4329                 return NULL;
4330
4331         event = rb_reader_event(cpu_buffer);
4332
4333         switch (event->type_len) {
4334         case RINGBUF_TYPE_PADDING:
4335                 if (rb_null_event(event))
4336                         RB_WARN_ON(cpu_buffer, 1);
4337                 /*
4338                  * Because the writer could be discarding every
4339                  * event it creates (which would probably be bad)
4340                  * if we were to go back to "again" then we may never
4341                  * catch up, and will trigger the warn on, or lock
4342                  * the box. Return the padding, and we will release
4343                  * the current locks, and try again.
4344                  */
4345                 return event;
4346
4347         case RINGBUF_TYPE_TIME_EXTEND:
4348                 /* Internal data, OK to advance */
4349                 rb_advance_reader(cpu_buffer);
4350                 goto again;
4351
4352         case RINGBUF_TYPE_TIME_STAMP:
4353                 if (ts) {
4354                         *ts = ring_buffer_event_time_stamp(event);
4355                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4356                                                          cpu_buffer->cpu, ts);
4357                 }
4358                 /* Internal data, OK to advance */
4359                 rb_advance_reader(cpu_buffer);
4360                 goto again;
4361
4362         case RINGBUF_TYPE_DATA:
4363                 if (ts && !(*ts)) {
4364                         *ts = cpu_buffer->read_stamp + event->time_delta;
4365                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4366                                                          cpu_buffer->cpu, ts);
4367                 }
4368                 if (lost_events)
4369                         *lost_events = rb_lost_events(cpu_buffer);
4370                 return event;
4371
4372         default:
4373                 RB_WARN_ON(cpu_buffer, 1);
4374         }
4375
4376         return NULL;
4377 }
4378 EXPORT_SYMBOL_GPL(ring_buffer_peek);
4379
4380 static struct ring_buffer_event *
4381 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4382 {
4383         struct trace_buffer *buffer;
4384         struct ring_buffer_per_cpu *cpu_buffer;
4385         struct ring_buffer_event *event;
4386         int nr_loops = 0;
4387
4388         if (ts)
4389                 *ts = 0;
4390
4391         cpu_buffer = iter->cpu_buffer;
4392         buffer = cpu_buffer->buffer;
4393
4394         /*
4395          * Check if someone performed a consuming read to
4396          * the buffer. A consuming read invalidates the iterator
4397          * and we need to reset the iterator in this case.
4398          */
4399         if (unlikely(iter->cache_read != cpu_buffer->read ||
4400                      iter->cache_reader_page != cpu_buffer->reader_page))
4401                 rb_iter_reset(iter);
4402
4403  again:
4404         if (ring_buffer_iter_empty(iter))
4405                 return NULL;
4406
4407         /*
4408          * As the writer can mess with what the iterator is trying
4409          * to read, just give up if we fail to get an event after
4410          * three tries. The iterator is not as reliable when reading
4411          * the ring buffer with an active write as the consumer is.
4412          * Do not warn if the three failures is reached.
4413          */
4414         if (++nr_loops > 3)
4415                 return NULL;
4416
4417         if (rb_per_cpu_empty(cpu_buffer))
4418                 return NULL;
4419
4420         if (iter->head >= rb_page_size(iter->head_page)) {
4421                 rb_inc_iter(iter);
4422                 goto again;
4423         }
4424
4425         event = rb_iter_head_event(iter);
4426         if (!event)
4427                 goto again;
4428
4429         switch (event->type_len) {
4430         case RINGBUF_TYPE_PADDING:
4431                 if (rb_null_event(event)) {
4432                         rb_inc_iter(iter);
4433                         goto again;
4434                 }
4435                 rb_advance_iter(iter);
4436                 return event;
4437
4438         case RINGBUF_TYPE_TIME_EXTEND:
4439                 /* Internal data, OK to advance */
4440                 rb_advance_iter(iter);
4441                 goto again;
4442
4443         case RINGBUF_TYPE_TIME_STAMP:
4444                 if (ts) {
4445                         *ts = ring_buffer_event_time_stamp(event);
4446                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4447                                                          cpu_buffer->cpu, ts);
4448                 }
4449                 /* Internal data, OK to advance */
4450                 rb_advance_iter(iter);
4451                 goto again;
4452
4453         case RINGBUF_TYPE_DATA:
4454                 if (ts && !(*ts)) {
4455                         *ts = iter->read_stamp + event->time_delta;
4456                         ring_buffer_normalize_time_stamp(buffer,
4457                                                          cpu_buffer->cpu, ts);
4458                 }
4459                 return event;
4460
4461         default:
4462                 RB_WARN_ON(cpu_buffer, 1);
4463         }
4464
4465         return NULL;
4466 }
4467 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4468
4469 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4470 {
4471         if (likely(!in_nmi())) {
4472                 raw_spin_lock(&cpu_buffer->reader_lock);
4473                 return true;
4474         }
4475
4476         /*
4477          * If an NMI die dumps out the content of the ring buffer
4478          * trylock must be used to prevent a deadlock if the NMI
4479          * preempted a task that holds the ring buffer locks. If
4480          * we get the lock then all is fine, if not, then continue
4481          * to do the read, but this can corrupt the ring buffer,
4482          * so it must be permanently disabled from future writes.
4483          * Reading from NMI is a oneshot deal.
4484          */
4485         if (raw_spin_trylock(&cpu_buffer->reader_lock))
4486                 return true;
4487
4488         /* Continue without locking, but disable the ring buffer */
4489         atomic_inc(&cpu_buffer->record_disabled);
4490         return false;
4491 }
4492
4493 static inline void
4494 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4495 {
4496         if (likely(locked))
4497                 raw_spin_unlock(&cpu_buffer->reader_lock);
4498         return;
4499 }
4500
4501 /**
4502  * ring_buffer_peek - peek at the next event to be read
4503  * @buffer: The ring buffer to read
4504  * @cpu: The cpu to peak at
4505  * @ts: The timestamp counter of this event.
4506  * @lost_events: a variable to store if events were lost (may be NULL)
4507  *
4508  * This will return the event that will be read next, but does
4509  * not consume the data.
4510  */
4511 struct ring_buffer_event *
4512 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4513                  unsigned long *lost_events)
4514 {
4515         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4516         struct ring_buffer_event *event;
4517         unsigned long flags;
4518         bool dolock;
4519
4520         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4521                 return NULL;
4522
4523  again:
4524         local_irq_save(flags);
4525         dolock = rb_reader_lock(cpu_buffer);
4526         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4527         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4528                 rb_advance_reader(cpu_buffer);
4529         rb_reader_unlock(cpu_buffer, dolock);
4530         local_irq_restore(flags);
4531
4532         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4533                 goto again;
4534
4535         return event;
4536 }
4537
4538 /** ring_buffer_iter_dropped - report if there are dropped events
4539  * @iter: The ring buffer iterator
4540  *
4541  * Returns true if there was dropped events since the last peek.
4542  */
4543 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4544 {
4545         bool ret = iter->missed_events != 0;
4546
4547         iter->missed_events = 0;
4548         return ret;
4549 }
4550 EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4551
4552 /**
4553  * ring_buffer_iter_peek - peek at the next event to be read
4554  * @iter: The ring buffer iterator
4555  * @ts: The timestamp counter of this event.
4556  *
4557  * This will return the event that will be read next, but does
4558  * not increment the iterator.
4559  */
4560 struct ring_buffer_event *
4561 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4562 {
4563         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4564         struct ring_buffer_event *event;
4565         unsigned long flags;
4566
4567  again:
4568         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4569         event = rb_iter_peek(iter, ts);
4570         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4571
4572         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4573                 goto again;
4574
4575         return event;
4576 }
4577
4578 /**
4579  * ring_buffer_consume - return an event and consume it
4580  * @buffer: The ring buffer to get the next event from
4581  * @cpu: the cpu to read the buffer from
4582  * @ts: a variable to store the timestamp (may be NULL)
4583  * @lost_events: a variable to store if events were lost (may be NULL)
4584  *
4585  * Returns the next event in the ring buffer, and that event is consumed.
4586  * Meaning, that sequential reads will keep returning a different event,
4587  * and eventually empty the ring buffer if the producer is slower.
4588  */
4589 struct ring_buffer_event *
4590 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
4591                     unsigned long *lost_events)
4592 {
4593         struct ring_buffer_per_cpu *cpu_buffer;
4594         struct ring_buffer_event *event = NULL;
4595         unsigned long flags;
4596         bool dolock;
4597
4598  again:
4599         /* might be called in atomic */
4600         preempt_disable();
4601
4602         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4603                 goto out;
4604
4605         cpu_buffer = buffer->buffers[cpu];
4606         local_irq_save(flags);
4607         dolock = rb_reader_lock(cpu_buffer);
4608
4609         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4610         if (event) {
4611                 cpu_buffer->lost_events = 0;
4612                 rb_advance_reader(cpu_buffer);
4613         }
4614
4615         rb_reader_unlock(cpu_buffer, dolock);
4616         local_irq_restore(flags);
4617
4618  out:
4619         preempt_enable();
4620
4621         if (event && event->type_len == RINGBUF_TYPE_PADDING)
4622                 goto again;
4623
4624         return event;
4625 }
4626 EXPORT_SYMBOL_GPL(ring_buffer_consume);
4627
4628 /**
4629  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4630  * @buffer: The ring buffer to read from
4631  * @cpu: The cpu buffer to iterate over
4632  * @flags: gfp flags to use for memory allocation
4633  *
4634  * This performs the initial preparations necessary to iterate
4635  * through the buffer.  Memory is allocated, buffer recording
4636  * is disabled, and the iterator pointer is returned to the caller.
4637  *
4638  * Disabling buffer recording prevents the reading from being
4639  * corrupted. This is not a consuming read, so a producer is not
4640  * expected.
4641  *
4642  * After a sequence of ring_buffer_read_prepare calls, the user is
4643  * expected to make at least one call to ring_buffer_read_prepare_sync.
4644  * Afterwards, ring_buffer_read_start is invoked to get things going
4645  * for real.
4646  *
4647  * This overall must be paired with ring_buffer_read_finish.
4648  */
4649 struct ring_buffer_iter *
4650 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
4651 {
4652         struct ring_buffer_per_cpu *cpu_buffer;
4653         struct ring_buffer_iter *iter;
4654
4655         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4656                 return NULL;
4657
4658         iter = kzalloc(sizeof(*iter), flags);
4659         if (!iter)
4660                 return NULL;
4661
4662         iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
4663         if (!iter->event) {
4664                 kfree(iter);
4665                 return NULL;
4666         }
4667
4668         cpu_buffer = buffer->buffers[cpu];
4669
4670         iter->cpu_buffer = cpu_buffer;
4671
4672         atomic_inc(&cpu_buffer->resize_disabled);
4673
4674         return iter;
4675 }
4676 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4677
4678 /**
4679  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4680  *
4681  * All previously invoked ring_buffer_read_prepare calls to prepare
4682  * iterators will be synchronized.  Afterwards, read_buffer_read_start
4683  * calls on those iterators are allowed.
4684  */
4685 void
4686 ring_buffer_read_prepare_sync(void)
4687 {
4688         synchronize_rcu();
4689 }
4690 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4691
4692 /**
4693  * ring_buffer_read_start - start a non consuming read of the buffer
4694  * @iter: The iterator returned by ring_buffer_read_prepare
4695  *
4696  * This finalizes the startup of an iteration through the buffer.
4697  * The iterator comes from a call to ring_buffer_read_prepare and
4698  * an intervening ring_buffer_read_prepare_sync must have been
4699  * performed.
4700  *
4701  * Must be paired with ring_buffer_read_finish.
4702  */
4703 void
4704 ring_buffer_read_start(struct ring_buffer_iter *iter)
4705 {
4706         struct ring_buffer_per_cpu *cpu_buffer;
4707         unsigned long flags;
4708
4709         if (!iter)
4710                 return;
4711
4712         cpu_buffer = iter->cpu_buffer;
4713
4714         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4715         arch_spin_lock(&cpu_buffer->lock);
4716         rb_iter_reset(iter);
4717         arch_spin_unlock(&cpu_buffer->lock);
4718         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4719 }
4720 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4721
4722 /**
4723  * ring_buffer_read_finish - finish reading the iterator of the buffer
4724  * @iter: The iterator retrieved by ring_buffer_start
4725  *
4726  * This re-enables the recording to the buffer, and frees the
4727  * iterator.
4728  */
4729 void
4730 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4731 {
4732         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4733         unsigned long flags;
4734
4735         /*
4736          * Ring buffer is disabled from recording, here's a good place
4737          * to check the integrity of the ring buffer.
4738          * Must prevent readers from trying to read, as the check
4739          * clears the HEAD page and readers require it.
4740          */
4741         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4742         rb_check_pages(cpu_buffer);
4743         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4744
4745         atomic_dec(&cpu_buffer->resize_disabled);
4746         kfree(iter->event);
4747         kfree(iter);
4748 }
4749 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4750
4751 /**
4752  * ring_buffer_iter_advance - advance the iterator to the next location
4753  * @iter: The ring buffer iterator
4754  *
4755  * Move the location of the iterator such that the next read will
4756  * be the next location of the iterator.
4757  */
4758 void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
4759 {
4760         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4761         unsigned long flags;
4762
4763         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4764
4765         rb_advance_iter(iter);
4766
4767         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4768 }
4769 EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
4770
4771 /**
4772  * ring_buffer_size - return the size of the ring buffer (in bytes)
4773  * @buffer: The ring buffer.
4774  * @cpu: The CPU to get ring buffer size from.
4775  */
4776 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
4777 {
4778         /*
4779          * Earlier, this method returned
4780          *      BUF_PAGE_SIZE * buffer->nr_pages
4781          * Since the nr_pages field is now removed, we have converted this to
4782          * return the per cpu buffer value.
4783          */
4784         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4785                 return 0;
4786
4787         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4788 }
4789 EXPORT_SYMBOL_GPL(ring_buffer_size);
4790
4791 static void
4792 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4793 {
4794         rb_head_page_deactivate(cpu_buffer);
4795
4796         cpu_buffer->head_page
4797                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4798         local_set(&cpu_buffer->head_page->write, 0);
4799         local_set(&cpu_buffer->head_page->entries, 0);
4800         local_set(&cpu_buffer->head_page->page->commit, 0);
4801
4802         cpu_buffer->head_page->read = 0;
4803
4804         cpu_buffer->tail_page = cpu_buffer->head_page;
4805         cpu_buffer->commit_page = cpu_buffer->head_page;
4806
4807         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4808         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4809         local_set(&cpu_buffer->reader_page->write, 0);
4810         local_set(&cpu_buffer->reader_page->entries, 0);
4811         local_set(&cpu_buffer->reader_page->page->commit, 0);
4812         cpu_buffer->reader_page->read = 0;
4813
4814         local_set(&cpu_buffer->entries_bytes, 0);
4815         local_set(&cpu_buffer->overrun, 0);
4816         local_set(&cpu_buffer->commit_overrun, 0);
4817         local_set(&cpu_buffer->dropped_events, 0);
4818         local_set(&cpu_buffer->entries, 0);
4819         local_set(&cpu_buffer->committing, 0);
4820         local_set(&cpu_buffer->commits, 0);
4821         local_set(&cpu_buffer->pages_touched, 0);
4822         local_set(&cpu_buffer->pages_read, 0);
4823         cpu_buffer->last_pages_touch = 0;
4824         cpu_buffer->shortest_full = 0;
4825         cpu_buffer->read = 0;
4826         cpu_buffer->read_bytes = 0;
4827
4828         rb_time_set(&cpu_buffer->write_stamp, 0);
4829         rb_time_set(&cpu_buffer->before_stamp, 0);
4830
4831         cpu_buffer->lost_events = 0;
4832         cpu_buffer->last_overrun = 0;
4833
4834         rb_head_page_activate(cpu_buffer);
4835 }
4836
4837 /* Must have disabled the cpu buffer then done a synchronize_rcu */
4838 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
4839 {
4840         unsigned long flags;
4841
4842         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4843
4844         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4845                 goto out;
4846
4847         arch_spin_lock(&cpu_buffer->lock);
4848
4849         rb_reset_cpu(cpu_buffer);
4850
4851         arch_spin_unlock(&cpu_buffer->lock);
4852
4853  out:
4854         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4855 }
4856
4857 /**
4858  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4859  * @buffer: The ring buffer to reset a per cpu buffer of
4860  * @cpu: The CPU buffer to be reset
4861  */
4862 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
4863 {
4864         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4865
4866         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4867                 return;
4868
4869         atomic_inc(&cpu_buffer->resize_disabled);
4870         atomic_inc(&cpu_buffer->record_disabled);
4871
4872         /* Make sure all commits have finished */
4873         synchronize_rcu();
4874
4875         reset_disabled_cpu_buffer(cpu_buffer);
4876
4877         atomic_dec(&cpu_buffer->record_disabled);
4878         atomic_dec(&cpu_buffer->resize_disabled);
4879 }
4880 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4881
4882 /**
4883  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4884  * @buffer: The ring buffer to reset a per cpu buffer of
4885  * @cpu: The CPU buffer to be reset
4886  */
4887 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
4888 {
4889         struct ring_buffer_per_cpu *cpu_buffer;
4890         int cpu;
4891
4892         for_each_online_buffer_cpu(buffer, cpu) {
4893                 cpu_buffer = buffer->buffers[cpu];
4894
4895                 atomic_inc(&cpu_buffer->resize_disabled);
4896                 atomic_inc(&cpu_buffer->record_disabled);
4897         }
4898
4899         /* Make sure all commits have finished */
4900         synchronize_rcu();
4901
4902         for_each_online_buffer_cpu(buffer, cpu) {
4903                 cpu_buffer = buffer->buffers[cpu];
4904
4905                 reset_disabled_cpu_buffer(cpu_buffer);
4906
4907                 atomic_dec(&cpu_buffer->record_disabled);
4908                 atomic_dec(&cpu_buffer->resize_disabled);
4909         }
4910 }
4911
4912 /**
4913  * ring_buffer_reset - reset a ring buffer
4914  * @buffer: The ring buffer to reset all cpu buffers
4915  */
4916 void ring_buffer_reset(struct trace_buffer *buffer)
4917 {
4918         struct ring_buffer_per_cpu *cpu_buffer;
4919         int cpu;
4920
4921         for_each_buffer_cpu(buffer, cpu) {
4922                 cpu_buffer = buffer->buffers[cpu];
4923
4924                 atomic_inc(&cpu_buffer->resize_disabled);
4925                 atomic_inc(&cpu_buffer->record_disabled);
4926         }
4927
4928         /* Make sure all commits have finished */
4929         synchronize_rcu();
4930
4931         for_each_buffer_cpu(buffer, cpu) {
4932                 cpu_buffer = buffer->buffers[cpu];
4933
4934                 reset_disabled_cpu_buffer(cpu_buffer);
4935
4936                 atomic_dec(&cpu_buffer->record_disabled);
4937                 atomic_dec(&cpu_buffer->resize_disabled);
4938         }
4939 }
4940 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4941
4942 /**
4943  * rind_buffer_empty - is the ring buffer empty?
4944  * @buffer: The ring buffer to test
4945  */
4946 bool ring_buffer_empty(struct trace_buffer *buffer)
4947 {
4948         struct ring_buffer_per_cpu *cpu_buffer;
4949         unsigned long flags;
4950         bool dolock;
4951         int cpu;
4952         int ret;
4953
4954         /* yes this is racy, but if you don't like the race, lock the buffer */
4955         for_each_buffer_cpu(buffer, cpu) {
4956                 cpu_buffer = buffer->buffers[cpu];
4957                 local_irq_save(flags);
4958                 dolock = rb_reader_lock(cpu_buffer);
4959                 ret = rb_per_cpu_empty(cpu_buffer);
4960                 rb_reader_unlock(cpu_buffer, dolock);
4961                 local_irq_restore(flags);
4962
4963                 if (!ret)
4964                         return false;
4965         }
4966
4967         return true;
4968 }
4969 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4970
4971 /**
4972  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4973  * @buffer: The ring buffer
4974  * @cpu: The CPU buffer to test
4975  */
4976 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
4977 {
4978         struct ring_buffer_per_cpu *cpu_buffer;
4979         unsigned long flags;
4980         bool dolock;
4981         int ret;
4982
4983         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4984                 return true;
4985
4986         cpu_buffer = buffer->buffers[cpu];
4987         local_irq_save(flags);
4988         dolock = rb_reader_lock(cpu_buffer);
4989         ret = rb_per_cpu_empty(cpu_buffer);
4990         rb_reader_unlock(cpu_buffer, dolock);
4991         local_irq_restore(flags);
4992
4993         return ret;
4994 }
4995 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4996
4997 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4998 /**
4999  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5000  * @buffer_a: One buffer to swap with
5001  * @buffer_b: The other buffer to swap with
5002  * @cpu: the CPU of the buffers to swap
5003  *
5004  * This function is useful for tracers that want to take a "snapshot"
5005  * of a CPU buffer and has another back up buffer lying around.
5006  * it is expected that the tracer handles the cpu buffer not being
5007  * used at the moment.
5008  */
5009 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5010                          struct trace_buffer *buffer_b, int cpu)
5011 {
5012         struct ring_buffer_per_cpu *cpu_buffer_a;
5013         struct ring_buffer_per_cpu *cpu_buffer_b;
5014         int ret = -EINVAL;
5015
5016         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5017             !cpumask_test_cpu(cpu, buffer_b->cpumask))
5018                 goto out;
5019
5020         cpu_buffer_a = buffer_a->buffers[cpu];
5021         cpu_buffer_b = buffer_b->buffers[cpu];
5022
5023         /* At least make sure the two buffers are somewhat the same */
5024         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5025                 goto out;
5026
5027         ret = -EAGAIN;
5028
5029         if (atomic_read(&buffer_a->record_disabled))
5030                 goto out;
5031
5032         if (atomic_read(&buffer_b->record_disabled))
5033                 goto out;
5034
5035         if (atomic_read(&cpu_buffer_a->record_disabled))
5036                 goto out;
5037
5038         if (atomic_read(&cpu_buffer_b->record_disabled))
5039                 goto out;
5040
5041         /*
5042          * We can't do a synchronize_rcu here because this
5043          * function can be called in atomic context.
5044          * Normally this will be called from the same CPU as cpu.
5045          * If not it's up to the caller to protect this.
5046          */
5047         atomic_inc(&cpu_buffer_a->record_disabled);
5048         atomic_inc(&cpu_buffer_b->record_disabled);
5049
5050         ret = -EBUSY;
5051         if (local_read(&cpu_buffer_a->committing))
5052                 goto out_dec;
5053         if (local_read(&cpu_buffer_b->committing))
5054                 goto out_dec;
5055
5056         buffer_a->buffers[cpu] = cpu_buffer_b;
5057         buffer_b->buffers[cpu] = cpu_buffer_a;
5058
5059         cpu_buffer_b->buffer = buffer_a;
5060         cpu_buffer_a->buffer = buffer_b;
5061
5062         ret = 0;
5063
5064 out_dec:
5065         atomic_dec(&cpu_buffer_a->record_disabled);
5066         atomic_dec(&cpu_buffer_b->record_disabled);
5067 out:
5068         return ret;
5069 }
5070 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5071 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5072
5073 /**
5074  * ring_buffer_alloc_read_page - allocate a page to read from buffer
5075  * @buffer: the buffer to allocate for.
5076  * @cpu: the cpu buffer to allocate.
5077  *
5078  * This function is used in conjunction with ring_buffer_read_page.
5079  * When reading a full page from the ring buffer, these functions
5080  * can be used to speed up the process. The calling function should
5081  * allocate a few pages first with this function. Then when it
5082  * needs to get pages from the ring buffer, it passes the result
5083  * of this function into ring_buffer_read_page, which will swap
5084  * the page that was allocated, with the read page of the buffer.
5085  *
5086  * Returns:
5087  *  The page allocated, or ERR_PTR
5088  */
5089 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5090 {
5091         struct ring_buffer_per_cpu *cpu_buffer;
5092         struct buffer_data_page *bpage = NULL;
5093         unsigned long flags;
5094         struct page *page;
5095
5096         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5097                 return ERR_PTR(-ENODEV);
5098
5099         cpu_buffer = buffer->buffers[cpu];
5100         local_irq_save(flags);
5101         arch_spin_lock(&cpu_buffer->lock);
5102
5103         if (cpu_buffer->free_page) {
5104                 bpage = cpu_buffer->free_page;
5105                 cpu_buffer->free_page = NULL;
5106         }
5107
5108         arch_spin_unlock(&cpu_buffer->lock);
5109         local_irq_restore(flags);
5110
5111         if (bpage)
5112                 goto out;
5113
5114         page = alloc_pages_node(cpu_to_node(cpu),
5115                                 GFP_KERNEL | __GFP_NORETRY, 0);
5116         if (!page)
5117                 return ERR_PTR(-ENOMEM);
5118
5119         bpage = page_address(page);
5120
5121  out:
5122         rb_init_page(bpage);
5123
5124         return bpage;
5125 }
5126 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5127
5128 /**
5129  * ring_buffer_free_read_page - free an allocated read page
5130  * @buffer: the buffer the page was allocate for
5131  * @cpu: the cpu buffer the page came from
5132  * @data: the page to free
5133  *
5134  * Free a page allocated from ring_buffer_alloc_read_page.
5135  */
5136 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5137 {
5138         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5139         struct buffer_data_page *bpage = data;
5140         struct page *page = virt_to_page(bpage);
5141         unsigned long flags;
5142
5143         /* If the page is still in use someplace else, we can't reuse it */
5144         if (page_ref_count(page) > 1)
5145                 goto out;
5146
5147         local_irq_save(flags);
5148         arch_spin_lock(&cpu_buffer->lock);
5149
5150         if (!cpu_buffer->free_page) {
5151                 cpu_buffer->free_page = bpage;
5152                 bpage = NULL;
5153         }
5154
5155         arch_spin_unlock(&cpu_buffer->lock);
5156         local_irq_restore(flags);
5157
5158  out:
5159         free_page((unsigned long)bpage);
5160 }
5161 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5162
5163 /**
5164  * ring_buffer_read_page - extract a page from the ring buffer
5165  * @buffer: buffer to extract from
5166  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5167  * @len: amount to extract
5168  * @cpu: the cpu of the buffer to extract
5169  * @full: should the extraction only happen when the page is full.
5170  *
5171  * This function will pull out a page from the ring buffer and consume it.
5172  * @data_page must be the address of the variable that was returned
5173  * from ring_buffer_alloc_read_page. This is because the page might be used
5174  * to swap with a page in the ring buffer.
5175  *
5176  * for example:
5177  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
5178  *      if (IS_ERR(rpage))
5179  *              return PTR_ERR(rpage);
5180  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5181  *      if (ret >= 0)
5182  *              process_page(rpage, ret);
5183  *
5184  * When @full is set, the function will not return true unless
5185  * the writer is off the reader page.
5186  *
5187  * Note: it is up to the calling functions to handle sleeps and wakeups.
5188  *  The ring buffer can be used anywhere in the kernel and can not
5189  *  blindly call wake_up. The layer that uses the ring buffer must be
5190  *  responsible for that.
5191  *
5192  * Returns:
5193  *  >=0 if data has been transferred, returns the offset of consumed data.
5194  *  <0 if no data has been transferred.
5195  */
5196 int ring_buffer_read_page(struct trace_buffer *buffer,
5197                           void **data_page, size_t len, int cpu, int full)
5198 {
5199         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5200         struct ring_buffer_event *event;
5201         struct buffer_data_page *bpage;
5202         struct buffer_page *reader;
5203         unsigned long missed_events;
5204         unsigned long flags;
5205         unsigned int commit;
5206         unsigned int read;
5207         u64 save_timestamp;
5208         int ret = -1;
5209
5210         if (!cpumask_test_cpu(cpu, buffer->cpumask))
5211                 goto out;
5212
5213         /*
5214          * If len is not big enough to hold the page header, then
5215          * we can not copy anything.
5216          */
5217         if (len <= BUF_PAGE_HDR_SIZE)
5218                 goto out;
5219
5220         len -= BUF_PAGE_HDR_SIZE;
5221
5222         if (!data_page)
5223                 goto out;
5224
5225         bpage = *data_page;
5226         if (!bpage)
5227                 goto out;
5228
5229         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5230
5231         reader = rb_get_reader_page(cpu_buffer);
5232         if (!reader)
5233                 goto out_unlock;
5234
5235         event = rb_reader_event(cpu_buffer);
5236
5237         read = reader->read;
5238         commit = rb_page_commit(reader);
5239
5240         /* Check if any events were dropped */
5241         missed_events = cpu_buffer->lost_events;
5242
5243         /*
5244          * If this page has been partially read or
5245          * if len is not big enough to read the rest of the page or
5246          * a writer is still on the page, then
5247          * we must copy the data from the page to the buffer.
5248          * Otherwise, we can simply swap the page with the one passed in.
5249          */
5250         if (read || (len < (commit - read)) ||
5251             cpu_buffer->reader_page == cpu_buffer->commit_page) {
5252                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5253                 unsigned int rpos = read;
5254                 unsigned int pos = 0;
5255                 unsigned int size;
5256
5257                 if (full)
5258                         goto out_unlock;
5259
5260                 if (len > (commit - read))
5261                         len = (commit - read);
5262
5263                 /* Always keep the time extend and data together */
5264                 size = rb_event_ts_length(event);
5265
5266                 if (len < size)
5267                         goto out_unlock;
5268
5269                 /* save the current timestamp, since the user will need it */
5270                 save_timestamp = cpu_buffer->read_stamp;
5271
5272                 /* Need to copy one event at a time */
5273                 do {
5274                         /* We need the size of one event, because
5275                          * rb_advance_reader only advances by one event,
5276                          * whereas rb_event_ts_length may include the size of
5277                          * one or two events.
5278                          * We have already ensured there's enough space if this
5279                          * is a time extend. */
5280                         size = rb_event_length(event);
5281                         memcpy(bpage->data + pos, rpage->data + rpos, size);
5282
5283                         len -= size;
5284
5285                         rb_advance_reader(cpu_buffer);
5286                         rpos = reader->read;
5287                         pos += size;
5288
5289                         if (rpos >= commit)
5290                                 break;
5291
5292                         event = rb_reader_event(cpu_buffer);
5293                         /* Always keep the time extend and data together */
5294                         size = rb_event_ts_length(event);
5295                 } while (len >= size);
5296
5297                 /* update bpage */
5298                 local_set(&bpage->commit, pos);
5299                 bpage->time_stamp = save_timestamp;
5300
5301                 /* we copied everything to the beginning */
5302                 read = 0;
5303         } else {
5304                 /* update the entry counter */
5305                 cpu_buffer->read += rb_page_entries(reader);
5306                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5307
5308                 /* swap the pages */
5309                 rb_init_page(bpage);
5310                 bpage = reader->page;
5311                 reader->page = *data_page;
5312                 local_set(&reader->write, 0);
5313                 local_set(&reader->entries, 0);
5314                 reader->read = 0;
5315                 *data_page = bpage;
5316
5317                 /*
5318                  * Use the real_end for the data size,
5319                  * This gives us a chance to store the lost events
5320                  * on the page.
5321                  */
5322                 if (reader->real_end)
5323                         local_set(&bpage->commit, reader->real_end);
5324         }
5325         ret = read;
5326
5327         cpu_buffer->lost_events = 0;
5328
5329         commit = local_read(&bpage->commit);
5330         /*
5331          * Set a flag in the commit field if we lost events
5332          */
5333         if (missed_events) {
5334                 /* If there is room at the end of the page to save the
5335                  * missed events, then record it there.
5336                  */
5337                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5338                         memcpy(&bpage->data[commit], &missed_events,
5339                                sizeof(missed_events));
5340                         local_add(RB_MISSED_STORED, &bpage->commit);
5341                         commit += sizeof(missed_events);
5342                 }
5343                 local_add(RB_MISSED_EVENTS, &bpage->commit);
5344         }
5345
5346         /*
5347          * This page may be off to user land. Zero it out here.
5348          */
5349         if (commit < BUF_PAGE_SIZE)
5350                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5351
5352  out_unlock:
5353         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5354
5355  out:
5356         return ret;
5357 }
5358 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5359
5360 /*
5361  * We only allocate new buffers, never free them if the CPU goes down.
5362  * If we were to free the buffer, then the user would lose any trace that was in
5363  * the buffer.
5364  */
5365 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
5366 {
5367         struct trace_buffer *buffer;
5368         long nr_pages_same;
5369         int cpu_i;
5370         unsigned long nr_pages;
5371
5372         buffer = container_of(node, struct trace_buffer, node);
5373         if (cpumask_test_cpu(cpu, buffer->cpumask))
5374                 return 0;
5375
5376         nr_pages = 0;
5377         nr_pages_same = 1;
5378         /* check if all cpu sizes are same */
5379         for_each_buffer_cpu(buffer, cpu_i) {
5380                 /* fill in the size from first enabled cpu */
5381                 if (nr_pages == 0)
5382                         nr_pages = buffer->buffers[cpu_i]->nr_pages;
5383                 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5384                         nr_pages_same = 0;
5385                         break;
5386                 }
5387         }
5388         /* allocate minimum pages, user can later expand it */
5389         if (!nr_pages_same)
5390                 nr_pages = 2;
5391         buffer->buffers[cpu] =
5392                 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5393         if (!buffer->buffers[cpu]) {
5394                 WARN(1, "failed to allocate ring buffer on CPU %u\n",
5395                      cpu);
5396                 return -ENOMEM;
5397         }
5398         smp_wmb();
5399         cpumask_set_cpu(cpu, buffer->cpumask);
5400         return 0;
5401 }
5402
5403 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5404 /*
5405  * This is a basic integrity check of the ring buffer.
5406  * Late in the boot cycle this test will run when configured in.
5407  * It will kick off a thread per CPU that will go into a loop
5408  * writing to the per cpu ring buffer various sizes of data.
5409  * Some of the data will be large items, some small.
5410  *
5411  * Another thread is created that goes into a spin, sending out
5412  * IPIs to the other CPUs to also write into the ring buffer.
5413  * this is to test the nesting ability of the buffer.
5414  *
5415  * Basic stats are recorded and reported. If something in the
5416  * ring buffer should happen that's not expected, a big warning
5417  * is displayed and all ring buffers are disabled.
5418  */
5419 static struct task_struct *rb_threads[NR_CPUS] __initdata;
5420
5421 struct rb_test_data {
5422         struct trace_buffer *buffer;
5423         unsigned long           events;
5424         unsigned long           bytes_written;
5425         unsigned long           bytes_alloc;
5426         unsigned long           bytes_dropped;
5427         unsigned long           events_nested;
5428         unsigned long           bytes_written_nested;
5429         unsigned long           bytes_alloc_nested;
5430         unsigned long           bytes_dropped_nested;
5431         int                     min_size_nested;
5432         int                     max_size_nested;
5433         int                     max_size;
5434         int                     min_size;
5435         int                     cpu;
5436         int                     cnt;
5437 };
5438
5439 static struct rb_test_data rb_data[NR_CPUS] __initdata;
5440
5441 /* 1 meg per cpu */
5442 #define RB_TEST_BUFFER_SIZE     1048576
5443
5444 static char rb_string[] __initdata =
5445         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5446         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5447         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5448
5449 static bool rb_test_started __initdata;
5450
5451 struct rb_item {
5452         int size;
5453         char str[];
5454 };
5455
5456 static __init int rb_write_something(struct rb_test_data *data, bool nested)
5457 {
5458         struct ring_buffer_event *event;
5459         struct rb_item *item;
5460         bool started;
5461         int event_len;
5462         int size;
5463         int len;
5464         int cnt;
5465
5466         /* Have nested writes different that what is written */
5467         cnt = data->cnt + (nested ? 27 : 0);
5468
5469         /* Multiply cnt by ~e, to make some unique increment */
5470         size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
5471
5472         len = size + sizeof(struct rb_item);
5473
5474         started = rb_test_started;
5475         /* read rb_test_started before checking buffer enabled */
5476         smp_rmb();
5477
5478         event = ring_buffer_lock_reserve(data->buffer, len);
5479         if (!event) {
5480                 /* Ignore dropped events before test starts. */
5481                 if (started) {
5482                         if (nested)
5483                                 data->bytes_dropped += len;
5484                         else
5485                                 data->bytes_dropped_nested += len;
5486                 }
5487                 return len;
5488         }
5489
5490         event_len = ring_buffer_event_length(event);
5491
5492         if (RB_WARN_ON(data->buffer, event_len < len))
5493                 goto out;
5494
5495         item = ring_buffer_event_data(event);
5496         item->size = size;
5497         memcpy(item->str, rb_string, size);
5498
5499         if (nested) {
5500                 data->bytes_alloc_nested += event_len;
5501                 data->bytes_written_nested += len;
5502                 data->events_nested++;
5503                 if (!data->min_size_nested || len < data->min_size_nested)
5504                         data->min_size_nested = len;
5505                 if (len > data->max_size_nested)
5506                         data->max_size_nested = len;
5507         } else {
5508                 data->bytes_alloc += event_len;
5509                 data->bytes_written += len;
5510                 data->events++;
5511                 if (!data->min_size || len < data->min_size)
5512                         data->max_size = len;
5513                 if (len > data->max_size)
5514                         data->max_size = len;
5515         }
5516
5517  out:
5518         ring_buffer_unlock_commit(data->buffer, event);
5519
5520         return 0;
5521 }
5522
5523 static __init int rb_test(void *arg)
5524 {
5525         struct rb_test_data *data = arg;
5526
5527         while (!kthread_should_stop()) {
5528                 rb_write_something(data, false);
5529                 data->cnt++;
5530
5531                 set_current_state(TASK_INTERRUPTIBLE);
5532                 /* Now sleep between a min of 100-300us and a max of 1ms */
5533                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5534         }
5535
5536         return 0;
5537 }
5538
5539 static __init void rb_ipi(void *ignore)
5540 {
5541         struct rb_test_data *data;
5542         int cpu = smp_processor_id();
5543
5544         data = &rb_data[cpu];
5545         rb_write_something(data, true);
5546 }
5547
5548 static __init int rb_hammer_test(void *arg)
5549 {
5550         while (!kthread_should_stop()) {
5551
5552                 /* Send an IPI to all cpus to write data! */
5553                 smp_call_function(rb_ipi, NULL, 1);
5554                 /* No sleep, but for non preempt, let others run */
5555                 schedule();
5556         }
5557
5558         return 0;
5559 }
5560
5561 static __init int test_ringbuffer(void)
5562 {
5563         struct task_struct *rb_hammer;
5564         struct trace_buffer *buffer;
5565         int cpu;
5566         int ret = 0;
5567
5568         if (security_locked_down(LOCKDOWN_TRACEFS)) {
5569                 pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
5570                 return 0;
5571         }
5572
5573         pr_info("Running ring buffer tests...\n");
5574
5575         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5576         if (WARN_ON(!buffer))
5577                 return 0;
5578
5579         /* Disable buffer so that threads can't write to it yet */
5580         ring_buffer_record_off(buffer);
5581
5582         for_each_online_cpu(cpu) {
5583                 rb_data[cpu].buffer = buffer;
5584                 rb_data[cpu].cpu = cpu;
5585                 rb_data[cpu].cnt = cpu;
5586                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5587                                                  "rbtester/%d", cpu);
5588                 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5589                         pr_cont("FAILED\n");
5590                         ret = PTR_ERR(rb_threads[cpu]);
5591                         goto out_free;
5592                 }
5593
5594                 kthread_bind(rb_threads[cpu], cpu);
5595                 wake_up_process(rb_threads[cpu]);
5596         }
5597
5598         /* Now create the rb hammer! */
5599         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5600         if (WARN_ON(IS_ERR(rb_hammer))) {
5601                 pr_cont("FAILED\n");
5602                 ret = PTR_ERR(rb_hammer);
5603                 goto out_free;
5604         }
5605
5606         ring_buffer_record_on(buffer);
5607         /*
5608          * Show buffer is enabled before setting rb_test_started.
5609          * Yes there's a small race window where events could be
5610          * dropped and the thread wont catch it. But when a ring
5611          * buffer gets enabled, there will always be some kind of
5612          * delay before other CPUs see it. Thus, we don't care about
5613          * those dropped events. We care about events dropped after
5614          * the threads see that the buffer is active.
5615          */
5616         smp_wmb();
5617         rb_test_started = true;
5618
5619         set_current_state(TASK_INTERRUPTIBLE);
5620         /* Just run for 10 seconds */;
5621         schedule_timeout(10 * HZ);
5622
5623         kthread_stop(rb_hammer);
5624
5625  out_free:
5626         for_each_online_cpu(cpu) {
5627                 if (!rb_threads[cpu])
5628                         break;
5629                 kthread_stop(rb_threads[cpu]);
5630         }
5631         if (ret) {
5632                 ring_buffer_free(buffer);
5633                 return ret;
5634         }
5635
5636         /* Report! */
5637         pr_info("finished\n");
5638         for_each_online_cpu(cpu) {
5639                 struct ring_buffer_event *event;
5640                 struct rb_test_data *data = &rb_data[cpu];
5641                 struct rb_item *item;
5642                 unsigned long total_events;
5643                 unsigned long total_dropped;
5644                 unsigned long total_written;
5645                 unsigned long total_alloc;
5646                 unsigned long total_read = 0;
5647                 unsigned long total_size = 0;
5648                 unsigned long total_len = 0;
5649                 unsigned long total_lost = 0;
5650                 unsigned long lost;
5651                 int big_event_size;
5652                 int small_event_size;
5653
5654                 ret = -1;
5655
5656                 total_events = data->events + data->events_nested;
5657                 total_written = data->bytes_written + data->bytes_written_nested;
5658                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5659                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5660
5661                 big_event_size = data->max_size + data->max_size_nested;
5662                 small_event_size = data->min_size + data->min_size_nested;
5663
5664                 pr_info("CPU %d:\n", cpu);
5665                 pr_info("              events:    %ld\n", total_events);
5666                 pr_info("       dropped bytes:    %ld\n", total_dropped);
5667                 pr_info("       alloced bytes:    %ld\n", total_alloc);
5668                 pr_info("       written bytes:    %ld\n", total_written);
5669                 pr_info("       biggest event:    %d\n", big_event_size);
5670                 pr_info("      smallest event:    %d\n", small_event_size);
5671
5672                 if (RB_WARN_ON(buffer, total_dropped))
5673                         break;
5674
5675                 ret = 0;
5676
5677                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5678                         total_lost += lost;
5679                         item = ring_buffer_event_data(event);
5680                         total_len += ring_buffer_event_length(event);
5681                         total_size += item->size + sizeof(struct rb_item);
5682                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5683                                 pr_info("FAILED!\n");
5684                                 pr_info("buffer had: %.*s\n", item->size, item->str);
5685                                 pr_info("expected:   %.*s\n", item->size, rb_string);
5686                                 RB_WARN_ON(buffer, 1);
5687                                 ret = -1;
5688                                 break;
5689                         }
5690                         total_read++;
5691                 }
5692                 if (ret)
5693                         break;
5694
5695                 ret = -1;
5696
5697                 pr_info("         read events:   %ld\n", total_read);
5698                 pr_info("         lost events:   %ld\n", total_lost);
5699                 pr_info("        total events:   %ld\n", total_lost + total_read);
5700                 pr_info("  recorded len bytes:   %ld\n", total_len);
5701                 pr_info(" recorded size bytes:   %ld\n", total_size);
5702                 if (total_lost)
5703                         pr_info(" With dropped events, record len and size may not match\n"
5704                                 " alloced and written from above\n");
5705                 if (!total_lost) {
5706                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
5707                                        total_size != total_written))
5708                                 break;
5709                 }
5710                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5711                         break;
5712
5713                 ret = 0;
5714         }
5715         if (!ret)
5716                 pr_info("Ring buffer PASSED!\n");
5717
5718         ring_buffer_free(buffer);
5719         return 0;
5720 }
5721
5722 late_initcall(test_ringbuffer);
5723 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */