1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracer architecture backend.
5 * Copyright IBM Corp. 2009,2014
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/moduleloader.h>
12 #include <linux/hardirq.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/kprobes.h>
18 #include <trace/syscall.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cacheflush.h>
21 #include <asm/ftrace.lds.h>
22 #include <asm/nospec-branch.h>
23 #include <asm/set_memory.h>
28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
30 * (since gcc 9 / clang 10) is used.
31 * In both cases the original and also the disabled function prologue contains
32 * only a single six byte instruction and looks like this:
33 * > brcl 0,0 # offset 0
34 * To enable ftrace the code gets patched like above and afterwards looks
36 * > brasl %r0,ftrace_caller # offset 0
38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
39 * The ftrace function gets called with a non-standard C function call ABI
40 * where r0 contains the return address. It is also expected that the called
41 * function only clobbers r0 and r1, but restores r2-r15.
42 * For module code we can't directly jump to ftrace caller, but need a
43 * trampoline (ftrace_plt), which clobbers also r1.
46 void *ftrace_func __read_mostly = ftrace_stub;
54 "ftrace_shared_hotpatch_trampoline_br:\n"
55 " lmg %r0,%r1,2(%r1)\n"
57 "ftrace_shared_hotpatch_trampoline_br_end:\n"
60 #ifdef CONFIG_EXPOLINE
63 "ftrace_shared_hotpatch_trampoline_ex:\n"
64 " lmg %r0,%r1,2(%r1)\n"
65 " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
67 "ftrace_shared_hotpatch_trampoline_ex_end:\n"
72 "ftrace_shared_hotpatch_trampoline_exrl:\n"
73 " lmg %r0,%r1,2(%r1)\n"
74 " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */
77 "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
79 #endif /* CONFIG_EXPOLINE */
82 static char *ftrace_plt;
86 "ftrace_plt_template:\n"
90 "0: .quad ftrace_caller\n"
91 "ftrace_plt_template_end:\n"
94 #endif /* CONFIG_MODULES */
96 static const char *ftrace_shared_hotpatch_trampoline(const char **end)
98 const char *tstart, *tend;
100 tstart = ftrace_shared_hotpatch_trampoline_br;
101 tend = ftrace_shared_hotpatch_trampoline_br_end;
102 #ifdef CONFIG_EXPOLINE
103 if (!nospec_disable) {
104 tstart = ftrace_shared_hotpatch_trampoline_ex;
105 tend = ftrace_shared_hotpatch_trampoline_ex_end;
106 if (test_facility(35)) { /* exrl */
107 tstart = ftrace_shared_hotpatch_trampoline_exrl;
108 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
111 #endif /* CONFIG_EXPOLINE */
117 bool ftrace_need_init_nop(void)
119 return ftrace_shared_hotpatch_trampoline(NULL);
122 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
124 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
125 __ftrace_hotpatch_trampolines_start;
126 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
127 static struct ftrace_hotpatch_trampoline *trampoline;
128 struct ftrace_hotpatch_trampoline **next_trampoline;
129 struct ftrace_hotpatch_trampoline *trampolines_end;
130 struct ftrace_hotpatch_trampoline tmp;
131 struct ftrace_insn *insn;
135 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
136 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
138 next_trampoline = &next_vmlinux_trampoline;
139 trampolines_end = __ftrace_hotpatch_trampolines_end;
140 shared = ftrace_shared_hotpatch_trampoline(NULL);
141 #ifdef CONFIG_MODULES
143 next_trampoline = &mod->arch.next_trampoline;
144 trampolines_end = mod->arch.trampolines_end;
149 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
151 trampoline = (*next_trampoline)++;
153 /* Check for the compiler-generated fentry nop (brcl 0, .). */
154 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
157 /* Generate the trampoline. */
158 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
159 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
160 tmp.interceptor = FTRACE_ADDR;
161 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
162 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
164 /* Generate a jump to the trampoline. */
165 disp = ((char *)trampoline - (char *)rec->ip) / 2;
166 insn = (struct ftrace_insn *)rec->ip;
167 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
172 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
178 static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
185 static void ftrace_generate_call_insn(struct ftrace_insn *insn,
188 unsigned long target;
190 /* brasl r0,ftrace_caller */
191 target = FTRACE_ADDR;
192 #ifdef CONFIG_MODULES
193 if (is_module_addr((void *)ip))
194 target = (unsigned long)ftrace_plt;
195 #endif /* CONFIG_MODULES */
197 insn->disp = (target - ip) / 2;
200 static void brcl_disable(void *brcl)
202 u8 op = 0x04; /* set mask field to zero */
204 s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
207 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
210 struct ftrace_insn orig, new, old;
212 if (ftrace_shared_hotpatch_trampoline(NULL)) {
213 brcl_disable((void *)rec->ip);
217 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
219 /* Replace ftrace call with a nop. */
220 ftrace_generate_call_insn(&orig, rec->ip);
221 ftrace_generate_nop_insn(&new);
223 /* Verify that the to be replaced code matches what we expect. */
224 if (memcmp(&orig, &old, sizeof(old)))
226 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
230 static void brcl_enable(void *brcl)
232 u8 op = 0xf4; /* set mask field to all ones */
234 s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
237 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
239 struct ftrace_insn orig, new, old;
241 if (ftrace_shared_hotpatch_trampoline(NULL)) {
242 brcl_enable((void *)rec->ip);
246 if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
248 /* Replace nop with an ftrace call. */
249 ftrace_generate_nop_insn(&orig);
250 ftrace_generate_call_insn(&new, rec->ip);
252 /* Verify that the to be replaced code matches what we expect. */
253 if (memcmp(&orig, &old, sizeof(old)))
255 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
259 int ftrace_update_ftrace_func(ftrace_func_t func)
265 int __init ftrace_dyn_arch_init(void)
270 void arch_ftrace_update_code(int command)
272 if (ftrace_shared_hotpatch_trampoline(NULL))
273 ftrace_modify_all_code(command);
275 ftrace_run_stop_machine(command);
278 static void __ftrace_sync(void *dummy)
282 int ftrace_arch_code_modify_post_process(void)
284 if (ftrace_shared_hotpatch_trampoline(NULL)) {
285 /* Send SIGP to the other CPUs, so they see the new code. */
286 smp_call_function(__ftrace_sync, NULL, 1);
291 #ifdef CONFIG_MODULES
293 static int __init ftrace_plt_init(void)
295 const char *start, *end;
297 ftrace_plt = module_alloc(PAGE_SIZE);
299 panic("cannot allocate ftrace plt\n");
301 start = ftrace_shared_hotpatch_trampoline(&end);
303 start = ftrace_plt_template;
304 end = ftrace_plt_template_end;
306 memcpy(ftrace_plt, start, end - start);
307 set_memory_ro((unsigned long)ftrace_plt, 1);
310 device_initcall(ftrace_plt_init);
312 #endif /* CONFIG_MODULES */
314 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
316 * Hook the return address and push it in the stack of return addresses
317 * in current thread info.
319 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
322 if (unlikely(ftrace_graph_is_dead()))
324 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
326 ip -= MCOUNT_INSN_SIZE;
327 if (!function_graph_enter(ra, ip, 0, (void *) sp))
328 ra = (unsigned long) return_to_handler;
332 NOKPROBE_SYMBOL(prepare_ftrace_return);
335 * Patch the kernel code at ftrace_graph_caller location. The instruction
336 * there is branch relative on condition. To enable the ftrace graph code
337 * block, we simply patch the mask field of the instruction to zero and
338 * turn the instruction into a nop.
339 * To disable the ftrace graph code the mask field will be patched to
340 * all ones, which turns the instruction into an unconditional branch.
342 int ftrace_enable_ftrace_graph_caller(void)
344 brcl_disable(ftrace_graph_caller);
348 int ftrace_disable_ftrace_graph_caller(void)
350 brcl_enable(ftrace_graph_caller);
354 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
356 #ifdef CONFIG_KPROBES_ON_FTRACE
357 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
358 struct ftrace_ops *ops, struct ftrace_regs *fregs)
360 struct kprobe_ctlblk *kcb;
361 struct pt_regs *regs;
365 bit = ftrace_test_recursion_trylock(ip, parent_ip);
369 regs = ftrace_get_regs(fregs);
370 preempt_disable_notrace();
371 p = get_kprobe((kprobe_opcode_t *)ip);
372 if (unlikely(!p) || kprobe_disabled(p))
375 if (kprobe_running()) {
376 kprobes_inc_nmissed_count(p);
380 __this_cpu_write(current_kprobe, p);
382 kcb = get_kprobe_ctlblk();
383 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
385 instruction_pointer_set(regs, ip);
387 if (!p->pre_handler || !p->pre_handler(p, regs)) {
389 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
391 if (unlikely(p->post_handler)) {
392 kcb->kprobe_status = KPROBE_HIT_SSDONE;
393 p->post_handler(p, regs, 0);
396 __this_cpu_write(current_kprobe, NULL);
398 preempt_enable_notrace();
399 ftrace_test_recursion_unlock(bit);
401 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
403 int arch_prepare_kprobe_ftrace(struct kprobe *p)
405 p->ainsn.insn = NULL;