1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracer architecture backend.
5 * Copyright IBM Corp. 2009,2014
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/moduleloader.h>
12 #include <linux/hardirq.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/kprobes.h>
18 #include <trace/syscall.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cacheflush.h>
21 #include <asm/set_memory.h>
25 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
26 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
27 * (since gcc 9 / clang 10) is used.
28 * In both cases the original and also the disabled function prologue contains
29 * only a single six byte instruction and looks like this:
30 * > brcl 0,0 # offset 0
31 * To enable ftrace the code gets patched like above and afterwards looks
33 * > brasl %r0,ftrace_caller # offset 0
35 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
36 * The ftrace function gets called with a non-standard C function call ABI
37 * where r0 contains the return address. It is also expected that the called
38 * function only clobbers r0 and r1, but restores r2-r15.
39 * For module code we can't directly jump to ftrace caller, but need a
40 * trampoline (ftrace_plt), which clobbers also r1.
43 void *ftrace_func __read_mostly = ftrace_stub;
44 unsigned long ftrace_plt;
46 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
52 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
55 struct ftrace_insn orig, new, old;
57 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
59 /* Replace ftrace call with a nop. */
60 ftrace_generate_call_insn(&orig, rec->ip);
61 ftrace_generate_nop_insn(&new);
63 /* Verify that the to be replaced code matches what we expect. */
64 if (memcmp(&orig, &old, sizeof(old)))
66 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
70 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
72 struct ftrace_insn orig, new, old;
74 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
76 /* Replace nop with an ftrace call. */
77 ftrace_generate_nop_insn(&orig);
78 ftrace_generate_call_insn(&new, rec->ip);
80 /* Verify that the to be replaced code matches what we expect. */
81 if (memcmp(&orig, &old, sizeof(old)))
83 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
87 int ftrace_update_ftrace_func(ftrace_func_t func)
93 int __init ftrace_dyn_arch_init(void)
100 static int __init ftrace_plt_init(void)
104 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
106 panic("cannot allocate ftrace plt\n");
107 ip = (unsigned int *) ftrace_plt;
108 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
111 ip[3] = FTRACE_ADDR >> 32;
112 ip[4] = FTRACE_ADDR & 0xffffffff;
113 set_memory_ro(ftrace_plt, 1);
116 device_initcall(ftrace_plt_init);
118 #endif /* CONFIG_MODULES */
120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
122 * Hook the return address and push it in the stack of return addresses
123 * in current thread info.
125 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
128 if (unlikely(ftrace_graph_is_dead()))
130 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
132 ip -= MCOUNT_INSN_SIZE;
133 if (!function_graph_enter(ra, ip, 0, (void *) sp))
134 ra = (unsigned long) return_to_handler;
138 NOKPROBE_SYMBOL(prepare_ftrace_return);
141 * Patch the kernel code at ftrace_graph_caller location. The instruction
142 * there is branch relative on condition. To enable the ftrace graph code
143 * block, we simply patch the mask field of the instruction to zero and
144 * turn the instruction into a nop.
145 * To disable the ftrace graph code the mask field will be patched to
146 * all ones, which turns the instruction into an unconditional branch.
148 int ftrace_enable_ftrace_graph_caller(void)
150 u8 op = 0x04; /* set mask field to zero */
152 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
156 int ftrace_disable_ftrace_graph_caller(void)
158 u8 op = 0xf4; /* set mask field to all ones */
160 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
164 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
166 #ifdef CONFIG_KPROBES_ON_FTRACE
167 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
168 struct ftrace_ops *ops, struct ftrace_regs *fregs)
170 struct kprobe_ctlblk *kcb;
171 struct pt_regs *regs;
175 bit = ftrace_test_recursion_trylock(ip, parent_ip);
179 regs = ftrace_get_regs(fregs);
180 preempt_disable_notrace();
181 p = get_kprobe((kprobe_opcode_t *)ip);
182 if (unlikely(!p) || kprobe_disabled(p))
185 if (kprobe_running()) {
186 kprobes_inc_nmissed_count(p);
190 __this_cpu_write(current_kprobe, p);
192 kcb = get_kprobe_ctlblk();
193 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
195 instruction_pointer_set(regs, ip);
197 if (!p->pre_handler || !p->pre_handler(p, regs)) {
199 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
201 if (unlikely(p->post_handler)) {
202 kcb->kprobe_status = KPROBE_HIT_SSDONE;
203 p->post_handler(p, regs, 0);
206 __this_cpu_write(current_kprobe, NULL);
208 preempt_enable_notrace();
209 ftrace_test_recursion_unlock(bit);
211 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
213 int arch_prepare_kprobe_ftrace(struct kprobe *p)
215 p->ainsn.insn = NULL;