mm: generalise COW SMC TLB flushing race comment
[linux-2.6-microblaze.git] / kernel / trace / trace_preemptirq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * preemptoff and irqoff tracepoints
4  *
5  * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6  */
7
8 #include <linux/kallsyms.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include "trace.h"
14
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/preemptirq.h>
17
18 #ifdef CONFIG_TRACE_IRQFLAGS
19 /* Per-cpu variable to prevent redundant calls when IRQs already off */
20 static DEFINE_PER_CPU(int, tracing_irq_cpu);
21
22 /*
23  * Like trace_hardirqs_on() but without the lockdep invocation. This is
24  * used in the low level entry code where the ordering vs. RCU is important
25  * and lockdep uses a staged approach which splits the lockdep hardirq
26  * tracking into a RCU on and a RCU off section.
27  */
28 void trace_hardirqs_on_prepare(void)
29 {
30         if (this_cpu_read(tracing_irq_cpu)) {
31                 if (!in_nmi())
32                         trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
33                 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
34                 this_cpu_write(tracing_irq_cpu, 0);
35         }
36 }
37 EXPORT_SYMBOL(trace_hardirqs_on_prepare);
38 NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
39
40 void trace_hardirqs_on(void)
41 {
42         if (this_cpu_read(tracing_irq_cpu)) {
43                 if (!in_nmi())
44                         trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
45                 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
46                 this_cpu_write(tracing_irq_cpu, 0);
47         }
48
49         lockdep_hardirqs_on_prepare(CALLER_ADDR0);
50         lockdep_hardirqs_on(CALLER_ADDR0);
51 }
52 EXPORT_SYMBOL(trace_hardirqs_on);
53 NOKPROBE_SYMBOL(trace_hardirqs_on);
54
55 /*
56  * Like trace_hardirqs_off() but without the lockdep invocation. This is
57  * used in the low level entry code where the ordering vs. RCU is important
58  * and lockdep uses a staged approach which splits the lockdep hardirq
59  * tracking into a RCU on and a RCU off section.
60  */
61 void trace_hardirqs_off_finish(void)
62 {
63         if (!this_cpu_read(tracing_irq_cpu)) {
64                 this_cpu_write(tracing_irq_cpu, 1);
65                 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
66                 if (!in_nmi())
67                         trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
68         }
69
70 }
71 EXPORT_SYMBOL(trace_hardirqs_off_finish);
72 NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
73
74 void trace_hardirqs_off(void)
75 {
76         lockdep_hardirqs_off(CALLER_ADDR0);
77
78         if (!this_cpu_read(tracing_irq_cpu)) {
79                 this_cpu_write(tracing_irq_cpu, 1);
80                 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
81                 if (!in_nmi())
82                         trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
83         }
84 }
85 EXPORT_SYMBOL(trace_hardirqs_off);
86 NOKPROBE_SYMBOL(trace_hardirqs_off);
87
88 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
89 {
90         if (this_cpu_read(tracing_irq_cpu)) {
91                 if (!in_nmi())
92                         trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
93                 tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
94                 this_cpu_write(tracing_irq_cpu, 0);
95         }
96
97         lockdep_hardirqs_on_prepare(CALLER_ADDR0);
98         lockdep_hardirqs_on(CALLER_ADDR0);
99 }
100 EXPORT_SYMBOL(trace_hardirqs_on_caller);
101 NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
102
103 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
104 {
105         lockdep_hardirqs_off(CALLER_ADDR0);
106
107         if (!this_cpu_read(tracing_irq_cpu)) {
108                 this_cpu_write(tracing_irq_cpu, 1);
109                 tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
110                 if (!in_nmi())
111                         trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
112         }
113 }
114 EXPORT_SYMBOL(trace_hardirqs_off_caller);
115 NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
116 #endif /* CONFIG_TRACE_IRQFLAGS */
117
118 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
119
120 void trace_preempt_on(unsigned long a0, unsigned long a1)
121 {
122         if (!in_nmi())
123                 trace_preempt_enable_rcuidle(a0, a1);
124         tracer_preempt_on(a0, a1);
125 }
126
127 void trace_preempt_off(unsigned long a0, unsigned long a1)
128 {
129         if (!in_nmi())
130                 trace_preempt_disable_rcuidle(a0, a1);
131         tracer_preempt_off(a0, a1);
132 }
133 #endif