ring-buffer: clean up warn ons
[linux-2.6-microblaze.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond)                    \
35         do {                                    \
36                 if (WARN_ON(cond))              \
37                         ftrace_kill();          \
38         } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond)               \
41         do {                                    \
42                 if (WARN_ON_ONCE(cond))         \
43                         ftrace_kill();          \
44         } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
52
53 /*
54  * ftrace_disabled is set when an anomaly is discovered.
55  * ftrace_disabled is much stronger than ftrace_enabled.
56  */
57 static int ftrace_disabled __read_mostly;
58
59 static DEFINE_SPINLOCK(ftrace_lock);
60 static DEFINE_MUTEX(ftrace_sysctl_lock);
61
62 static struct ftrace_ops ftrace_list_end __read_mostly =
63 {
64         .func = ftrace_stub,
65 };
66
67 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
68 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
69 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
70
71 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
72 {
73         struct ftrace_ops *op = ftrace_list;
74
75         /* in case someone actually ports this to alpha! */
76         read_barrier_depends();
77
78         while (op != &ftrace_list_end) {
79                 /* silly alpha */
80                 read_barrier_depends();
81                 op->func(ip, parent_ip);
82                 op = op->next;
83         };
84 }
85
86 /**
87  * clear_ftrace_function - reset the ftrace function
88  *
89  * This NULLs the ftrace function and in essence stops
90  * tracing.  There may be lag
91  */
92 void clear_ftrace_function(void)
93 {
94         ftrace_trace_function = ftrace_stub;
95         __ftrace_trace_function = ftrace_stub;
96 }
97
98 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
99 /*
100  * For those archs that do not test ftrace_trace_stop in their
101  * mcount call site, we need to do it from C.
102  */
103 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
104 {
105         if (function_trace_stop)
106                 return;
107
108         __ftrace_trace_function(ip, parent_ip);
109 }
110 #endif
111
112 static int __register_ftrace_function(struct ftrace_ops *ops)
113 {
114         /* should not be called from interrupt context */
115         spin_lock(&ftrace_lock);
116
117         ops->next = ftrace_list;
118         /*
119          * We are entering ops into the ftrace_list but another
120          * CPU might be walking that list. We need to make sure
121          * the ops->next pointer is valid before another CPU sees
122          * the ops pointer included into the ftrace_list.
123          */
124         smp_wmb();
125         ftrace_list = ops;
126
127         if (ftrace_enabled) {
128                 /*
129                  * For one func, simply call it directly.
130                  * For more than one func, call the chain.
131                  */
132 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
133                 if (ops->next == &ftrace_list_end)
134                         ftrace_trace_function = ops->func;
135                 else
136                         ftrace_trace_function = ftrace_list_func;
137 #else
138                 if (ops->next == &ftrace_list_end)
139                         __ftrace_trace_function = ops->func;
140                 else
141                         __ftrace_trace_function = ftrace_list_func;
142                 ftrace_trace_function = ftrace_test_stop_func;
143 #endif
144         }
145
146         spin_unlock(&ftrace_lock);
147
148         return 0;
149 }
150
151 static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 {
153         struct ftrace_ops **p;
154         int ret = 0;
155
156         /* should not be called from interrupt context */
157         spin_lock(&ftrace_lock);
158
159         /*
160          * If we are removing the last function, then simply point
161          * to the ftrace_stub.
162          */
163         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
164                 ftrace_trace_function = ftrace_stub;
165                 ftrace_list = &ftrace_list_end;
166                 goto out;
167         }
168
169         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
170                 if (*p == ops)
171                         break;
172
173         if (*p != ops) {
174                 ret = -1;
175                 goto out;
176         }
177
178         *p = (*p)->next;
179
180         if (ftrace_enabled) {
181                 /* If we only have one func left, then call that directly */
182                 if (ftrace_list == &ftrace_list_end ||
183                     ftrace_list->next == &ftrace_list_end)
184                         ftrace_trace_function = ftrace_list->func;
185         }
186
187  out:
188         spin_unlock(&ftrace_lock);
189
190         return ret;
191 }
192
193 #ifdef CONFIG_DYNAMIC_FTRACE
194 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
195 # error Dynamic ftrace depends on MCOUNT_RECORD
196 #endif
197
198 /*
199  * Since MCOUNT_ADDR may point to mcount itself, we do not want
200  * to get it confused by reading a reference in the code as we
201  * are parsing on objcopy output of text. Use a variable for
202  * it instead.
203  */
204 static unsigned long mcount_addr = MCOUNT_ADDR;
205
206 enum {
207         FTRACE_ENABLE_CALLS             = (1 << 0),
208         FTRACE_DISABLE_CALLS            = (1 << 1),
209         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
210         FTRACE_ENABLE_MCOUNT            = (1 << 3),
211         FTRACE_DISABLE_MCOUNT           = (1 << 4),
212 };
213
214 static int ftrace_filtered;
215 static int tracing_on;
216
217 static LIST_HEAD(ftrace_new_addrs);
218
219 static DEFINE_MUTEX(ftrace_regex_lock);
220
221 struct ftrace_page {
222         struct ftrace_page      *next;
223         unsigned long           index;
224         struct dyn_ftrace       records[];
225 };
226
227 #define ENTRIES_PER_PAGE \
228   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
229
230 /* estimate from running different kernels */
231 #define NR_TO_INIT              10000
232
233 static struct ftrace_page       *ftrace_pages_start;
234 static struct ftrace_page       *ftrace_pages;
235
236 static struct dyn_ftrace *ftrace_free_records;
237
238
239 #ifdef CONFIG_KPROBES
240
241 static int frozen_record_count;
242
243 static inline void freeze_record(struct dyn_ftrace *rec)
244 {
245         if (!(rec->flags & FTRACE_FL_FROZEN)) {
246                 rec->flags |= FTRACE_FL_FROZEN;
247                 frozen_record_count++;
248         }
249 }
250
251 static inline void unfreeze_record(struct dyn_ftrace *rec)
252 {
253         if (rec->flags & FTRACE_FL_FROZEN) {
254                 rec->flags &= ~FTRACE_FL_FROZEN;
255                 frozen_record_count--;
256         }
257 }
258
259 static inline int record_frozen(struct dyn_ftrace *rec)
260 {
261         return rec->flags & FTRACE_FL_FROZEN;
262 }
263 #else
264 # define freeze_record(rec)                     ({ 0; })
265 # define unfreeze_record(rec)                   ({ 0; })
266 # define record_frozen(rec)                     ({ 0; })
267 #endif /* CONFIG_KPROBES */
268
269 static void ftrace_free_rec(struct dyn_ftrace *rec)
270 {
271         rec->ip = (unsigned long)ftrace_free_records;
272         ftrace_free_records = rec;
273         rec->flags |= FTRACE_FL_FREE;
274 }
275
276 void ftrace_release(void *start, unsigned long size)
277 {
278         struct dyn_ftrace *rec;
279         struct ftrace_page *pg;
280         unsigned long s = (unsigned long)start;
281         unsigned long e = s + size;
282         int i;
283
284         if (ftrace_disabled || !start)
285                 return;
286
287         /* should not be called from interrupt context */
288         spin_lock(&ftrace_lock);
289
290         for (pg = ftrace_pages_start; pg; pg = pg->next) {
291                 for (i = 0; i < pg->index; i++) {
292                         rec = &pg->records[i];
293
294                         if ((rec->ip >= s) && (rec->ip < e))
295                                 ftrace_free_rec(rec);
296                 }
297         }
298         spin_unlock(&ftrace_lock);
299 }
300
301 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
302 {
303         struct dyn_ftrace *rec;
304
305         /* First check for freed records */
306         if (ftrace_free_records) {
307                 rec = ftrace_free_records;
308
309                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
310                         FTRACE_WARN_ON_ONCE(1);
311                         ftrace_free_records = NULL;
312                         return NULL;
313                 }
314
315                 ftrace_free_records = (void *)rec->ip;
316                 memset(rec, 0, sizeof(*rec));
317                 return rec;
318         }
319
320         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
321                 if (!ftrace_pages->next) {
322                         /* allocate another page */
323                         ftrace_pages->next =
324                                 (void *)get_zeroed_page(GFP_KERNEL);
325                         if (!ftrace_pages->next)
326                                 return NULL;
327                 }
328                 ftrace_pages = ftrace_pages->next;
329         }
330
331         return &ftrace_pages->records[ftrace_pages->index++];
332 }
333
334 static struct dyn_ftrace *
335 ftrace_record_ip(unsigned long ip)
336 {
337         struct dyn_ftrace *rec;
338
339         if (!ftrace_enabled || ftrace_disabled)
340                 return NULL;
341
342         rec = ftrace_alloc_dyn_node(ip);
343         if (!rec)
344                 return NULL;
345
346         rec->ip = ip;
347
348         list_add(&rec->list, &ftrace_new_addrs);
349
350         return rec;
351 }
352
353 #define FTRACE_ADDR ((long)(ftrace_caller))
354
355 static int
356 __ftrace_replace_code(struct dyn_ftrace *rec,
357                       unsigned char *old, unsigned char *new, int enable)
358 {
359         unsigned long ip, fl;
360
361         ip = rec->ip;
362
363         if (ftrace_filtered && enable) {
364                 /*
365                  * If filtering is on:
366                  *
367                  * If this record is set to be filtered and
368                  * is enabled then do nothing.
369                  *
370                  * If this record is set to be filtered and
371                  * it is not enabled, enable it.
372                  *
373                  * If this record is not set to be filtered
374                  * and it is not enabled do nothing.
375                  *
376                  * If this record is set not to trace then
377                  * do nothing.
378                  *
379                  * If this record is set not to trace and
380                  * it is enabled then disable it.
381                  *
382                  * If this record is not set to be filtered and
383                  * it is enabled, disable it.
384                  */
385
386                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
387                                    FTRACE_FL_ENABLED);
388
389                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
390                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
391                     !fl || (fl == FTRACE_FL_NOTRACE))
392                         return 0;
393
394                 /*
395                  * If it is enabled disable it,
396                  * otherwise enable it!
397                  */
398                 if (fl & FTRACE_FL_ENABLED) {
399                         /* swap new and old */
400                         new = old;
401                         old = ftrace_call_replace(ip, FTRACE_ADDR);
402                         rec->flags &= ~FTRACE_FL_ENABLED;
403                 } else {
404                         new = ftrace_call_replace(ip, FTRACE_ADDR);
405                         rec->flags |= FTRACE_FL_ENABLED;
406                 }
407         } else {
408
409                 if (enable) {
410                         /*
411                          * If this record is set not to trace and is
412                          * not enabled, do nothing.
413                          */
414                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
415                         if (fl == FTRACE_FL_NOTRACE)
416                                 return 0;
417
418                         new = ftrace_call_replace(ip, FTRACE_ADDR);
419                 } else
420                         old = ftrace_call_replace(ip, FTRACE_ADDR);
421
422                 if (enable) {
423                         if (rec->flags & FTRACE_FL_ENABLED)
424                                 return 0;
425                         rec->flags |= FTRACE_FL_ENABLED;
426                 } else {
427                         if (!(rec->flags & FTRACE_FL_ENABLED))
428                                 return 0;
429                         rec->flags &= ~FTRACE_FL_ENABLED;
430                 }
431         }
432
433         return ftrace_modify_code(ip, old, new);
434 }
435
436 static void ftrace_replace_code(int enable)
437 {
438         int i, failed;
439         unsigned char *new = NULL, *old = NULL;
440         struct dyn_ftrace *rec;
441         struct ftrace_page *pg;
442
443         if (enable)
444                 old = ftrace_nop_replace();
445         else
446                 new = ftrace_nop_replace();
447
448         for (pg = ftrace_pages_start; pg; pg = pg->next) {
449                 for (i = 0; i < pg->index; i++) {
450                         rec = &pg->records[i];
451
452                         /* don't modify code that has already faulted */
453                         if (rec->flags & FTRACE_FL_FAILED)
454                                 continue;
455
456                         /* ignore updates to this record's mcount site */
457                         if (get_kprobe((void *)rec->ip)) {
458                                 freeze_record(rec);
459                                 continue;
460                         } else {
461                                 unfreeze_record(rec);
462                         }
463
464                         failed = __ftrace_replace_code(rec, old, new, enable);
465                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
466                                 rec->flags |= FTRACE_FL_FAILED;
467                                 if ((system_state == SYSTEM_BOOTING) ||
468                                     !core_kernel_text(rec->ip)) {
469                                         ftrace_free_rec(rec);
470                                 }
471                         }
472                 }
473         }
474 }
475
476 static void print_ip_ins(const char *fmt, unsigned char *p)
477 {
478         int i;
479
480         printk(KERN_CONT "%s", fmt);
481
482         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
483                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
484 }
485
486 static int
487 ftrace_code_disable(struct dyn_ftrace *rec)
488 {
489         unsigned long ip;
490         unsigned char *nop, *call;
491         int ret;
492
493         ip = rec->ip;
494
495         nop = ftrace_nop_replace();
496         call = ftrace_call_replace(ip, mcount_addr);
497
498         ret = ftrace_modify_code(ip, call, nop);
499         if (ret) {
500                 switch (ret) {
501                 case -EFAULT:
502                         FTRACE_WARN_ON_ONCE(1);
503                         pr_info("ftrace faulted on modifying ");
504                         print_ip_sym(ip);
505                         break;
506                 case -EINVAL:
507                         FTRACE_WARN_ON_ONCE(1);
508                         pr_info("ftrace failed to modify ");
509                         print_ip_sym(ip);
510                         print_ip_ins(" expected: ", call);
511                         print_ip_ins(" actual: ", (unsigned char *)ip);
512                         print_ip_ins(" replace: ", nop);
513                         printk(KERN_CONT "\n");
514                         break;
515                 case -EPERM:
516                         FTRACE_WARN_ON_ONCE(1);
517                         pr_info("ftrace faulted on writing ");
518                         print_ip_sym(ip);
519                         break;
520                 default:
521                         FTRACE_WARN_ON_ONCE(1);
522                         pr_info("ftrace faulted on unknown error ");
523                         print_ip_sym(ip);
524                 }
525
526                 rec->flags |= FTRACE_FL_FAILED;
527                 return 0;
528         }
529         return 1;
530 }
531
532 static int __ftrace_modify_code(void *data)
533 {
534         int *command = data;
535
536         if (*command & FTRACE_ENABLE_CALLS) {
537                 ftrace_replace_code(1);
538                 tracing_on = 1;
539         } else if (*command & FTRACE_DISABLE_CALLS) {
540                 ftrace_replace_code(0);
541                 tracing_on = 0;
542         }
543
544         if (*command & FTRACE_UPDATE_TRACE_FUNC)
545                 ftrace_update_ftrace_func(ftrace_trace_function);
546
547         return 0;
548 }
549
550 static void ftrace_run_update_code(int command)
551 {
552         stop_machine(__ftrace_modify_code, &command, NULL);
553 }
554
555 static ftrace_func_t saved_ftrace_func;
556 static int ftrace_start_up;
557 static DEFINE_MUTEX(ftrace_start_lock);
558
559 static void ftrace_startup(void)
560 {
561         int command = 0;
562
563         if (unlikely(ftrace_disabled))
564                 return;
565
566         mutex_lock(&ftrace_start_lock);
567         ftrace_start_up++;
568         if (ftrace_start_up == 1)
569                 command |= FTRACE_ENABLE_CALLS;
570
571         if (saved_ftrace_func != ftrace_trace_function) {
572                 saved_ftrace_func = ftrace_trace_function;
573                 command |= FTRACE_UPDATE_TRACE_FUNC;
574         }
575
576         if (!command || !ftrace_enabled)
577                 goto out;
578
579         ftrace_run_update_code(command);
580  out:
581         mutex_unlock(&ftrace_start_lock);
582 }
583
584 static void ftrace_shutdown(void)
585 {
586         int command = 0;
587
588         if (unlikely(ftrace_disabled))
589                 return;
590
591         mutex_lock(&ftrace_start_lock);
592         ftrace_start_up--;
593         if (!ftrace_start_up)
594                 command |= FTRACE_DISABLE_CALLS;
595
596         if (saved_ftrace_func != ftrace_trace_function) {
597                 saved_ftrace_func = ftrace_trace_function;
598                 command |= FTRACE_UPDATE_TRACE_FUNC;
599         }
600
601         if (!command || !ftrace_enabled)
602                 goto out;
603
604         ftrace_run_update_code(command);
605  out:
606         mutex_unlock(&ftrace_start_lock);
607 }
608
609 static void ftrace_startup_sysctl(void)
610 {
611         int command = FTRACE_ENABLE_MCOUNT;
612
613         if (unlikely(ftrace_disabled))
614                 return;
615
616         mutex_lock(&ftrace_start_lock);
617         /* Force update next time */
618         saved_ftrace_func = NULL;
619         /* ftrace_start_up is true if we want ftrace running */
620         if (ftrace_start_up)
621                 command |= FTRACE_ENABLE_CALLS;
622
623         ftrace_run_update_code(command);
624         mutex_unlock(&ftrace_start_lock);
625 }
626
627 static void ftrace_shutdown_sysctl(void)
628 {
629         int command = FTRACE_DISABLE_MCOUNT;
630
631         if (unlikely(ftrace_disabled))
632                 return;
633
634         mutex_lock(&ftrace_start_lock);
635         /* ftrace_start_up is true if ftrace is running */
636         if (ftrace_start_up)
637                 command |= FTRACE_DISABLE_CALLS;
638
639         ftrace_run_update_code(command);
640         mutex_unlock(&ftrace_start_lock);
641 }
642
643 static cycle_t          ftrace_update_time;
644 static unsigned long    ftrace_update_cnt;
645 unsigned long           ftrace_update_tot_cnt;
646
647 static int ftrace_update_code(void)
648 {
649         struct dyn_ftrace *p, *t;
650         cycle_t start, stop;
651
652         start = ftrace_now(raw_smp_processor_id());
653         ftrace_update_cnt = 0;
654
655         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
656
657                 /* If something went wrong, bail without enabling anything */
658                 if (unlikely(ftrace_disabled))
659                         return -1;
660
661                 list_del_init(&p->list);
662
663                 /* convert record (i.e, patch mcount-call with NOP) */
664                 if (ftrace_code_disable(p)) {
665                         p->flags |= FTRACE_FL_CONVERTED;
666                         ftrace_update_cnt++;
667                 } else
668                         ftrace_free_rec(p);
669         }
670
671         stop = ftrace_now(raw_smp_processor_id());
672         ftrace_update_time = stop - start;
673         ftrace_update_tot_cnt += ftrace_update_cnt;
674
675         return 0;
676 }
677
678 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
679 {
680         struct ftrace_page *pg;
681         int cnt;
682         int i;
683
684         /* allocate a few pages */
685         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
686         if (!ftrace_pages_start)
687                 return -1;
688
689         /*
690          * Allocate a few more pages.
691          *
692          * TODO: have some parser search vmlinux before
693          *   final linking to find all calls to ftrace.
694          *   Then we can:
695          *    a) know how many pages to allocate.
696          *     and/or
697          *    b) set up the table then.
698          *
699          *  The dynamic code is still necessary for
700          *  modules.
701          */
702
703         pg = ftrace_pages = ftrace_pages_start;
704
705         cnt = num_to_init / ENTRIES_PER_PAGE;
706         pr_info("ftrace: allocating %ld entries in %d pages\n",
707                 num_to_init, cnt);
708
709         for (i = 0; i < cnt; i++) {
710                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
711
712                 /* If we fail, we'll try later anyway */
713                 if (!pg->next)
714                         break;
715
716                 pg = pg->next;
717         }
718
719         return 0;
720 }
721
722 enum {
723         FTRACE_ITER_FILTER      = (1 << 0),
724         FTRACE_ITER_CONT        = (1 << 1),
725         FTRACE_ITER_NOTRACE     = (1 << 2),
726         FTRACE_ITER_FAILURES    = (1 << 3),
727 };
728
729 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
730
731 struct ftrace_iterator {
732         loff_t                  pos;
733         struct ftrace_page      *pg;
734         unsigned                idx;
735         unsigned                flags;
736         unsigned char           buffer[FTRACE_BUFF_MAX+1];
737         unsigned                buffer_idx;
738         unsigned                filtered;
739 };
740
741 static void *
742 t_next(struct seq_file *m, void *v, loff_t *pos)
743 {
744         struct ftrace_iterator *iter = m->private;
745         struct dyn_ftrace *rec = NULL;
746
747         (*pos)++;
748
749         /* should not be called from interrupt context */
750         spin_lock(&ftrace_lock);
751  retry:
752         if (iter->idx >= iter->pg->index) {
753                 if (iter->pg->next) {
754                         iter->pg = iter->pg->next;
755                         iter->idx = 0;
756                         goto retry;
757                 }
758         } else {
759                 rec = &iter->pg->records[iter->idx++];
760                 if ((rec->flags & FTRACE_FL_FREE) ||
761
762                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
763                      (rec->flags & FTRACE_FL_FAILED)) ||
764
765                     ((iter->flags & FTRACE_ITER_FAILURES) &&
766                      !(rec->flags & FTRACE_FL_FAILED)) ||
767
768                     ((iter->flags & FTRACE_ITER_FILTER) &&
769                      !(rec->flags & FTRACE_FL_FILTER)) ||
770
771                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
772                      !(rec->flags & FTRACE_FL_NOTRACE))) {
773                         rec = NULL;
774                         goto retry;
775                 }
776         }
777         spin_unlock(&ftrace_lock);
778
779         iter->pos = *pos;
780
781         return rec;
782 }
783
784 static void *t_start(struct seq_file *m, loff_t *pos)
785 {
786         struct ftrace_iterator *iter = m->private;
787         void *p = NULL;
788         loff_t l = -1;
789
790         if (*pos != iter->pos) {
791                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
792                         ;
793         } else {
794                 l = *pos;
795                 p = t_next(m, p, &l);
796         }
797
798         return p;
799 }
800
801 static void t_stop(struct seq_file *m, void *p)
802 {
803 }
804
805 static int t_show(struct seq_file *m, void *v)
806 {
807         struct dyn_ftrace *rec = v;
808         char str[KSYM_SYMBOL_LEN];
809
810         if (!rec)
811                 return 0;
812
813         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
814
815         seq_printf(m, "%s\n", str);
816
817         return 0;
818 }
819
820 static struct seq_operations show_ftrace_seq_ops = {
821         .start = t_start,
822         .next = t_next,
823         .stop = t_stop,
824         .show = t_show,
825 };
826
827 static int
828 ftrace_avail_open(struct inode *inode, struct file *file)
829 {
830         struct ftrace_iterator *iter;
831         int ret;
832
833         if (unlikely(ftrace_disabled))
834                 return -ENODEV;
835
836         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
837         if (!iter)
838                 return -ENOMEM;
839
840         iter->pg = ftrace_pages_start;
841         iter->pos = -1;
842
843         ret = seq_open(file, &show_ftrace_seq_ops);
844         if (!ret) {
845                 struct seq_file *m = file->private_data;
846
847                 m->private = iter;
848         } else {
849                 kfree(iter);
850         }
851
852         return ret;
853 }
854
855 int ftrace_avail_release(struct inode *inode, struct file *file)
856 {
857         struct seq_file *m = (struct seq_file *)file->private_data;
858         struct ftrace_iterator *iter = m->private;
859
860         seq_release(inode, file);
861         kfree(iter);
862
863         return 0;
864 }
865
866 static int
867 ftrace_failures_open(struct inode *inode, struct file *file)
868 {
869         int ret;
870         struct seq_file *m;
871         struct ftrace_iterator *iter;
872
873         ret = ftrace_avail_open(inode, file);
874         if (!ret) {
875                 m = (struct seq_file *)file->private_data;
876                 iter = (struct ftrace_iterator *)m->private;
877                 iter->flags = FTRACE_ITER_FAILURES;
878         }
879
880         return ret;
881 }
882
883
884 static void ftrace_filter_reset(int enable)
885 {
886         struct ftrace_page *pg;
887         struct dyn_ftrace *rec;
888         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
889         unsigned i;
890
891         /* should not be called from interrupt context */
892         spin_lock(&ftrace_lock);
893         if (enable)
894                 ftrace_filtered = 0;
895         pg = ftrace_pages_start;
896         while (pg) {
897                 for (i = 0; i < pg->index; i++) {
898                         rec = &pg->records[i];
899                         if (rec->flags & FTRACE_FL_FAILED)
900                                 continue;
901                         rec->flags &= ~type;
902                 }
903                 pg = pg->next;
904         }
905         spin_unlock(&ftrace_lock);
906 }
907
908 static int
909 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
910 {
911         struct ftrace_iterator *iter;
912         int ret = 0;
913
914         if (unlikely(ftrace_disabled))
915                 return -ENODEV;
916
917         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
918         if (!iter)
919                 return -ENOMEM;
920
921         mutex_lock(&ftrace_regex_lock);
922         if ((file->f_mode & FMODE_WRITE) &&
923             !(file->f_flags & O_APPEND))
924                 ftrace_filter_reset(enable);
925
926         if (file->f_mode & FMODE_READ) {
927                 iter->pg = ftrace_pages_start;
928                 iter->pos = -1;
929                 iter->flags = enable ? FTRACE_ITER_FILTER :
930                         FTRACE_ITER_NOTRACE;
931
932                 ret = seq_open(file, &show_ftrace_seq_ops);
933                 if (!ret) {
934                         struct seq_file *m = file->private_data;
935                         m->private = iter;
936                 } else
937                         kfree(iter);
938         } else
939                 file->private_data = iter;
940         mutex_unlock(&ftrace_regex_lock);
941
942         return ret;
943 }
944
945 static int
946 ftrace_filter_open(struct inode *inode, struct file *file)
947 {
948         return ftrace_regex_open(inode, file, 1);
949 }
950
951 static int
952 ftrace_notrace_open(struct inode *inode, struct file *file)
953 {
954         return ftrace_regex_open(inode, file, 0);
955 }
956
957 static ssize_t
958 ftrace_regex_read(struct file *file, char __user *ubuf,
959                        size_t cnt, loff_t *ppos)
960 {
961         if (file->f_mode & FMODE_READ)
962                 return seq_read(file, ubuf, cnt, ppos);
963         else
964                 return -EPERM;
965 }
966
967 static loff_t
968 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
969 {
970         loff_t ret;
971
972         if (file->f_mode & FMODE_READ)
973                 ret = seq_lseek(file, offset, origin);
974         else
975                 file->f_pos = ret = 1;
976
977         return ret;
978 }
979
980 enum {
981         MATCH_FULL,
982         MATCH_FRONT_ONLY,
983         MATCH_MIDDLE_ONLY,
984         MATCH_END_ONLY,
985 };
986
987 static void
988 ftrace_match(unsigned char *buff, int len, int enable)
989 {
990         char str[KSYM_SYMBOL_LEN];
991         char *search = NULL;
992         struct ftrace_page *pg;
993         struct dyn_ftrace *rec;
994         int type = MATCH_FULL;
995         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
996         unsigned i, match = 0, search_len = 0;
997
998         for (i = 0; i < len; i++) {
999                 if (buff[i] == '*') {
1000                         if (!i) {
1001                                 search = buff + i + 1;
1002                                 type = MATCH_END_ONLY;
1003                                 search_len = len - (i + 1);
1004                         } else {
1005                                 if (type == MATCH_END_ONLY) {
1006                                         type = MATCH_MIDDLE_ONLY;
1007                                 } else {
1008                                         match = i;
1009                                         type = MATCH_FRONT_ONLY;
1010                                 }
1011                                 buff[i] = 0;
1012                                 break;
1013                         }
1014                 }
1015         }
1016
1017         /* should not be called from interrupt context */
1018         spin_lock(&ftrace_lock);
1019         if (enable)
1020                 ftrace_filtered = 1;
1021         pg = ftrace_pages_start;
1022         while (pg) {
1023                 for (i = 0; i < pg->index; i++) {
1024                         int matched = 0;
1025                         char *ptr;
1026
1027                         rec = &pg->records[i];
1028                         if (rec->flags & FTRACE_FL_FAILED)
1029                                 continue;
1030                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1031                         switch (type) {
1032                         case MATCH_FULL:
1033                                 if (strcmp(str, buff) == 0)
1034                                         matched = 1;
1035                                 break;
1036                         case MATCH_FRONT_ONLY:
1037                                 if (memcmp(str, buff, match) == 0)
1038                                         matched = 1;
1039                                 break;
1040                         case MATCH_MIDDLE_ONLY:
1041                                 if (strstr(str, search))
1042                                         matched = 1;
1043                                 break;
1044                         case MATCH_END_ONLY:
1045                                 ptr = strstr(str, search);
1046                                 if (ptr && (ptr[search_len] == 0))
1047                                         matched = 1;
1048                                 break;
1049                         }
1050                         if (matched)
1051                                 rec->flags |= flag;
1052                 }
1053                 pg = pg->next;
1054         }
1055         spin_unlock(&ftrace_lock);
1056 }
1057
1058 static ssize_t
1059 ftrace_regex_write(struct file *file, const char __user *ubuf,
1060                    size_t cnt, loff_t *ppos, int enable)
1061 {
1062         struct ftrace_iterator *iter;
1063         char ch;
1064         size_t read = 0;
1065         ssize_t ret;
1066
1067         if (!cnt || cnt < 0)
1068                 return 0;
1069
1070         mutex_lock(&ftrace_regex_lock);
1071
1072         if (file->f_mode & FMODE_READ) {
1073                 struct seq_file *m = file->private_data;
1074                 iter = m->private;
1075         } else
1076                 iter = file->private_data;
1077
1078         if (!*ppos) {
1079                 iter->flags &= ~FTRACE_ITER_CONT;
1080                 iter->buffer_idx = 0;
1081         }
1082
1083         ret = get_user(ch, ubuf++);
1084         if (ret)
1085                 goto out;
1086         read++;
1087         cnt--;
1088
1089         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1090                 /* skip white space */
1091                 while (cnt && isspace(ch)) {
1092                         ret = get_user(ch, ubuf++);
1093                         if (ret)
1094                                 goto out;
1095                         read++;
1096                         cnt--;
1097                 }
1098
1099                 if (isspace(ch)) {
1100                         file->f_pos += read;
1101                         ret = read;
1102                         goto out;
1103                 }
1104
1105                 iter->buffer_idx = 0;
1106         }
1107
1108         while (cnt && !isspace(ch)) {
1109                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1110                         iter->buffer[iter->buffer_idx++] = ch;
1111                 else {
1112                         ret = -EINVAL;
1113                         goto out;
1114                 }
1115                 ret = get_user(ch, ubuf++);
1116                 if (ret)
1117                         goto out;
1118                 read++;
1119                 cnt--;
1120         }
1121
1122         if (isspace(ch)) {
1123                 iter->filtered++;
1124                 iter->buffer[iter->buffer_idx] = 0;
1125                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1126                 iter->buffer_idx = 0;
1127         } else
1128                 iter->flags |= FTRACE_ITER_CONT;
1129
1130
1131         file->f_pos += read;
1132
1133         ret = read;
1134  out:
1135         mutex_unlock(&ftrace_regex_lock);
1136
1137         return ret;
1138 }
1139
1140 static ssize_t
1141 ftrace_filter_write(struct file *file, const char __user *ubuf,
1142                     size_t cnt, loff_t *ppos)
1143 {
1144         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1145 }
1146
1147 static ssize_t
1148 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1149                      size_t cnt, loff_t *ppos)
1150 {
1151         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1152 }
1153
1154 static void
1155 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1156 {
1157         if (unlikely(ftrace_disabled))
1158                 return;
1159
1160         mutex_lock(&ftrace_regex_lock);
1161         if (reset)
1162                 ftrace_filter_reset(enable);
1163         if (buf)
1164                 ftrace_match(buf, len, enable);
1165         mutex_unlock(&ftrace_regex_lock);
1166 }
1167
1168 /**
1169  * ftrace_set_filter - set a function to filter on in ftrace
1170  * @buf - the string that holds the function filter text.
1171  * @len - the length of the string.
1172  * @reset - non zero to reset all filters before applying this filter.
1173  *
1174  * Filters denote which functions should be enabled when tracing is enabled.
1175  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1176  */
1177 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1178 {
1179         ftrace_set_regex(buf, len, reset, 1);
1180 }
1181
1182 /**
1183  * ftrace_set_notrace - set a function to not trace in ftrace
1184  * @buf - the string that holds the function notrace text.
1185  * @len - the length of the string.
1186  * @reset - non zero to reset all filters before applying this filter.
1187  *
1188  * Notrace Filters denote which functions should not be enabled when tracing
1189  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1190  * for tracing.
1191  */
1192 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1193 {
1194         ftrace_set_regex(buf, len, reset, 0);
1195 }
1196
1197 static int
1198 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1199 {
1200         struct seq_file *m = (struct seq_file *)file->private_data;
1201         struct ftrace_iterator *iter;
1202
1203         mutex_lock(&ftrace_regex_lock);
1204         if (file->f_mode & FMODE_READ) {
1205                 iter = m->private;
1206
1207                 seq_release(inode, file);
1208         } else
1209                 iter = file->private_data;
1210
1211         if (iter->buffer_idx) {
1212                 iter->filtered++;
1213                 iter->buffer[iter->buffer_idx] = 0;
1214                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1215         }
1216
1217         mutex_lock(&ftrace_sysctl_lock);
1218         mutex_lock(&ftrace_start_lock);
1219         if (iter->filtered && ftrace_start_up && ftrace_enabled)
1220                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1221         mutex_unlock(&ftrace_start_lock);
1222         mutex_unlock(&ftrace_sysctl_lock);
1223
1224         kfree(iter);
1225         mutex_unlock(&ftrace_regex_lock);
1226         return 0;
1227 }
1228
1229 static int
1230 ftrace_filter_release(struct inode *inode, struct file *file)
1231 {
1232         return ftrace_regex_release(inode, file, 1);
1233 }
1234
1235 static int
1236 ftrace_notrace_release(struct inode *inode, struct file *file)
1237 {
1238         return ftrace_regex_release(inode, file, 0);
1239 }
1240
1241 static struct file_operations ftrace_avail_fops = {
1242         .open = ftrace_avail_open,
1243         .read = seq_read,
1244         .llseek = seq_lseek,
1245         .release = ftrace_avail_release,
1246 };
1247
1248 static struct file_operations ftrace_failures_fops = {
1249         .open = ftrace_failures_open,
1250         .read = seq_read,
1251         .llseek = seq_lseek,
1252         .release = ftrace_avail_release,
1253 };
1254
1255 static struct file_operations ftrace_filter_fops = {
1256         .open = ftrace_filter_open,
1257         .read = ftrace_regex_read,
1258         .write = ftrace_filter_write,
1259         .llseek = ftrace_regex_lseek,
1260         .release = ftrace_filter_release,
1261 };
1262
1263 static struct file_operations ftrace_notrace_fops = {
1264         .open = ftrace_notrace_open,
1265         .read = ftrace_regex_read,
1266         .write = ftrace_notrace_write,
1267         .llseek = ftrace_regex_lseek,
1268         .release = ftrace_notrace_release,
1269 };
1270
1271 static __init int ftrace_init_debugfs(void)
1272 {
1273         struct dentry *d_tracer;
1274         struct dentry *entry;
1275
1276         d_tracer = tracing_init_dentry();
1277
1278         entry = debugfs_create_file("available_filter_functions", 0444,
1279                                     d_tracer, NULL, &ftrace_avail_fops);
1280         if (!entry)
1281                 pr_warning("Could not create debugfs "
1282                            "'available_filter_functions' entry\n");
1283
1284         entry = debugfs_create_file("failures", 0444,
1285                                     d_tracer, NULL, &ftrace_failures_fops);
1286         if (!entry)
1287                 pr_warning("Could not create debugfs 'failures' entry\n");
1288
1289         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1290                                     NULL, &ftrace_filter_fops);
1291         if (!entry)
1292                 pr_warning("Could not create debugfs "
1293                            "'set_ftrace_filter' entry\n");
1294
1295         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1296                                     NULL, &ftrace_notrace_fops);
1297         if (!entry)
1298                 pr_warning("Could not create debugfs "
1299                            "'set_ftrace_notrace' entry\n");
1300
1301         return 0;
1302 }
1303
1304 fs_initcall(ftrace_init_debugfs);
1305
1306 static int ftrace_convert_nops(unsigned long *start,
1307                                unsigned long *end)
1308 {
1309         unsigned long *p;
1310         unsigned long addr;
1311         unsigned long flags;
1312
1313         mutex_lock(&ftrace_start_lock);
1314         p = start;
1315         while (p < end) {
1316                 addr = ftrace_call_adjust(*p++);
1317                 ftrace_record_ip(addr);
1318         }
1319
1320         /* disable interrupts to prevent kstop machine */
1321         local_irq_save(flags);
1322         ftrace_update_code();
1323         local_irq_restore(flags);
1324         mutex_unlock(&ftrace_start_lock);
1325
1326         return 0;
1327 }
1328
1329 void ftrace_init_module(unsigned long *start, unsigned long *end)
1330 {
1331         if (ftrace_disabled || start == end)
1332                 return;
1333         ftrace_convert_nops(start, end);
1334 }
1335
1336 extern unsigned long __start_mcount_loc[];
1337 extern unsigned long __stop_mcount_loc[];
1338
1339 void __init ftrace_init(void)
1340 {
1341         unsigned long count, addr, flags;
1342         int ret;
1343
1344         /* Keep the ftrace pointer to the stub */
1345         addr = (unsigned long)ftrace_stub;
1346
1347         local_irq_save(flags);
1348         ftrace_dyn_arch_init(&addr);
1349         local_irq_restore(flags);
1350
1351         /* ftrace_dyn_arch_init places the return code in addr */
1352         if (addr)
1353                 goto failed;
1354
1355         count = __stop_mcount_loc - __start_mcount_loc;
1356
1357         ret = ftrace_dyn_table_alloc(count);
1358         if (ret)
1359                 goto failed;
1360
1361         last_ftrace_enabled = ftrace_enabled = 1;
1362
1363         ret = ftrace_convert_nops(__start_mcount_loc,
1364                                   __stop_mcount_loc);
1365
1366         return;
1367  failed:
1368         ftrace_disabled = 1;
1369 }
1370
1371 #else
1372
1373 static int __init ftrace_nodyn_init(void)
1374 {
1375         ftrace_enabled = 1;
1376         return 0;
1377 }
1378 device_initcall(ftrace_nodyn_init);
1379
1380 # define ftrace_startup()               do { } while (0)
1381 # define ftrace_shutdown()              do { } while (0)
1382 # define ftrace_startup_sysctl()        do { } while (0)
1383 # define ftrace_shutdown_sysctl()       do { } while (0)
1384 #endif /* CONFIG_DYNAMIC_FTRACE */
1385
1386 /**
1387  * ftrace_kill - kill ftrace
1388  *
1389  * This function should be used by panic code. It stops ftrace
1390  * but in a not so nice way. If you need to simply kill ftrace
1391  * from a non-atomic section, use ftrace_kill.
1392  */
1393 void ftrace_kill(void)
1394 {
1395         ftrace_disabled = 1;
1396         ftrace_enabled = 0;
1397         clear_ftrace_function();
1398 }
1399
1400 /**
1401  * register_ftrace_function - register a function for profiling
1402  * @ops - ops structure that holds the function for profiling.
1403  *
1404  * Register a function to be called by all functions in the
1405  * kernel.
1406  *
1407  * Note: @ops->func and all the functions it calls must be labeled
1408  *       with "notrace", otherwise it will go into a
1409  *       recursive loop.
1410  */
1411 int register_ftrace_function(struct ftrace_ops *ops)
1412 {
1413         int ret;
1414
1415         if (unlikely(ftrace_disabled))
1416                 return -1;
1417
1418         mutex_lock(&ftrace_sysctl_lock);
1419         ret = __register_ftrace_function(ops);
1420         ftrace_startup();
1421         mutex_unlock(&ftrace_sysctl_lock);
1422
1423         return ret;
1424 }
1425
1426 /**
1427  * unregister_ftrace_function - unresgister a function for profiling.
1428  * @ops - ops structure that holds the function to unregister
1429  *
1430  * Unregister a function that was added to be called by ftrace profiling.
1431  */
1432 int unregister_ftrace_function(struct ftrace_ops *ops)
1433 {
1434         int ret;
1435
1436         mutex_lock(&ftrace_sysctl_lock);
1437         ret = __unregister_ftrace_function(ops);
1438         ftrace_shutdown();
1439         mutex_unlock(&ftrace_sysctl_lock);
1440
1441         return ret;
1442 }
1443
1444 int
1445 ftrace_enable_sysctl(struct ctl_table *table, int write,
1446                      struct file *file, void __user *buffer, size_t *lenp,
1447                      loff_t *ppos)
1448 {
1449         int ret;
1450
1451         if (unlikely(ftrace_disabled))
1452                 return -ENODEV;
1453
1454         mutex_lock(&ftrace_sysctl_lock);
1455
1456         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1457
1458         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1459                 goto out;
1460
1461         last_ftrace_enabled = ftrace_enabled;
1462
1463         if (ftrace_enabled) {
1464
1465                 ftrace_startup_sysctl();
1466
1467                 /* we are starting ftrace again */
1468                 if (ftrace_list != &ftrace_list_end) {
1469                         if (ftrace_list->next == &ftrace_list_end)
1470                                 ftrace_trace_function = ftrace_list->func;
1471                         else
1472                                 ftrace_trace_function = ftrace_list_func;
1473                 }
1474
1475         } else {
1476                 /* stopping ftrace calls (just send to ftrace_stub) */
1477                 ftrace_trace_function = ftrace_stub;
1478
1479                 ftrace_shutdown_sysctl();
1480         }
1481
1482  out:
1483         mutex_unlock(&ftrace_sysctl_lock);
1484         return ret;
1485 }
1486