1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracer architecture backend.
5 * Copyright IBM Corp. 2009,2014
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/moduleloader.h>
11 #include <linux/hardirq.h>
12 #include <linux/uaccess.h>
13 #include <linux/ftrace.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/kprobes.h>
17 #include <trace/syscall.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/text-patching.h>
20 #include <asm/cacheflush.h>
21 #include <asm/ftrace.lds.h>
22 #include <asm/nospec-branch.h>
23 #include <asm/set_memory.h>
28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
30 * (since gcc 9 / clang 10) is used.
31 * In both cases the original and also the disabled function prologue contains
32 * only a single six byte instruction and looks like this:
33 * > brcl 0,0 # offset 0
34 * To enable ftrace the code gets patched like above and afterwards looks
36 * > brasl %r0,ftrace_caller # offset 0
38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
39 * The ftrace function gets called with a non-standard C function call ABI
40 * where r0 contains the return address. It is also expected that the called
41 * function only clobbers r0 and r1, but restores r2-r15.
42 * For module code we can't directly jump to ftrace caller, but need a
43 * trampoline (ftrace_plt), which clobbers also r1.
46 void *ftrace_func __read_mostly = ftrace_stub;
54 "ftrace_shared_hotpatch_trampoline_br:\n"
55 " lmg %r0,%r1,2(%r1)\n"
57 "ftrace_shared_hotpatch_trampoline_br_end:\n"
60 #ifdef CONFIG_EXPOLINE
63 "ftrace_shared_hotpatch_trampoline_ex:\n"
64 " lmg %r0,%r1,2(%r1)\n"
65 " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
67 "ftrace_shared_hotpatch_trampoline_ex_end:\n"
72 "ftrace_shared_hotpatch_trampoline_exrl:\n"
73 " lmg %r0,%r1,2(%r1)\n"
74 " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */
77 "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
79 #endif /* CONFIG_EXPOLINE */
82 static char *ftrace_plt;
83 #endif /* CONFIG_MODULES */
85 static const char *ftrace_shared_hotpatch_trampoline(const char **end)
87 const char *tstart, *tend;
89 tstart = ftrace_shared_hotpatch_trampoline_br;
90 tend = ftrace_shared_hotpatch_trampoline_br_end;
91 #ifdef CONFIG_EXPOLINE
92 if (!nospec_disable) {
93 tstart = ftrace_shared_hotpatch_trampoline_ex;
94 tend = ftrace_shared_hotpatch_trampoline_ex_end;
95 if (test_facility(35)) { /* exrl */
96 tstart = ftrace_shared_hotpatch_trampoline_exrl;
97 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
100 #endif /* CONFIG_EXPOLINE */
106 bool ftrace_need_init_nop(void)
111 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
113 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
114 __ftrace_hotpatch_trampolines_start;
115 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
116 static struct ftrace_hotpatch_trampoline *trampoline;
117 struct ftrace_hotpatch_trampoline **next_trampoline;
118 struct ftrace_hotpatch_trampoline *trampolines_end;
119 struct ftrace_hotpatch_trampoline tmp;
120 struct ftrace_insn *insn;
124 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
125 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
127 next_trampoline = &next_vmlinux_trampoline;
128 trampolines_end = __ftrace_hotpatch_trampolines_end;
129 shared = ftrace_shared_hotpatch_trampoline(NULL);
130 #ifdef CONFIG_MODULES
132 next_trampoline = &mod->arch.next_trampoline;
133 trampolines_end = mod->arch.trampolines_end;
138 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
140 trampoline = (*next_trampoline)++;
142 /* Check for the compiler-generated fentry nop (brcl 0, .). */
143 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
146 /* Generate the trampoline. */
147 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
148 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
149 tmp.interceptor = FTRACE_ADDR;
150 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
151 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
153 /* Generate a jump to the trampoline. */
154 disp = ((char *)trampoline - (char *)rec->ip) / 2;
155 insn = (struct ftrace_insn *)rec->ip;
156 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
161 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
167 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
172 if (get_kernel_nofault(old, addr))
176 /* set mask field to all ones or zeroes */
177 op = enable ? 0xf4 : 0x04;
178 s390_kernel_write((char *)addr + 1, &op, sizeof(op));
182 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
185 /* Expect brcl 0xf,... */
186 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
189 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
191 /* Expect brcl 0x0,... */
192 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
195 int ftrace_update_ftrace_func(ftrace_func_t func)
201 void arch_ftrace_update_code(int command)
203 ftrace_modify_all_code(command);
206 int ftrace_arch_code_modify_post_process(void)
209 * Flush any pre-fetched instructions on all
210 * CPUs to make the new code visible.
212 text_poke_sync_lock();
216 #ifdef CONFIG_MODULES
218 static int __init ftrace_plt_init(void)
220 const char *start, *end;
222 ftrace_plt = module_alloc(PAGE_SIZE);
224 panic("cannot allocate ftrace plt\n");
226 start = ftrace_shared_hotpatch_trampoline(&end);
227 memcpy(ftrace_plt, start, end - start);
228 set_memory_ro((unsigned long)ftrace_plt, 1);
231 device_initcall(ftrace_plt_init);
233 #endif /* CONFIG_MODULES */
235 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
237 * Hook the return address and push it in the stack of return addresses
238 * in current thread info.
240 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
243 if (unlikely(ftrace_graph_is_dead()))
245 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
247 ip -= MCOUNT_INSN_SIZE;
248 if (!function_graph_enter(ra, ip, 0, (void *) sp))
249 ra = (unsigned long) return_to_handler;
253 NOKPROBE_SYMBOL(prepare_ftrace_return);
256 * Patch the kernel code at ftrace_graph_caller location. The instruction
257 * there is branch relative on condition. To enable the ftrace graph code
258 * block, we simply patch the mask field of the instruction to zero and
259 * turn the instruction into a nop.
260 * To disable the ftrace graph code the mask field will be patched to
261 * all ones, which turns the instruction into an unconditional branch.
263 int ftrace_enable_ftrace_graph_caller(void)
267 /* Expect brc 0xf,... */
268 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
271 text_poke_sync_lock();
275 int ftrace_disable_ftrace_graph_caller(void)
279 /* Expect brc 0x0,... */
280 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
283 text_poke_sync_lock();
287 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
289 #ifdef CONFIG_KPROBES_ON_FTRACE
290 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
291 struct ftrace_ops *ops, struct ftrace_regs *fregs)
293 struct kprobe_ctlblk *kcb;
294 struct pt_regs *regs;
298 bit = ftrace_test_recursion_trylock(ip, parent_ip);
302 regs = ftrace_get_regs(fregs);
303 p = get_kprobe((kprobe_opcode_t *)ip);
304 if (unlikely(!p) || kprobe_disabled(p))
307 if (kprobe_running()) {
308 kprobes_inc_nmissed_count(p);
312 __this_cpu_write(current_kprobe, p);
314 kcb = get_kprobe_ctlblk();
315 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
317 instruction_pointer_set(regs, ip);
319 if (!p->pre_handler || !p->pre_handler(p, regs)) {
321 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
323 if (unlikely(p->post_handler)) {
324 kcb->kprobe_status = KPROBE_HIT_SSDONE;
325 p->post_handler(p, regs, 0);
328 __this_cpu_write(current_kprobe, NULL);
330 ftrace_test_recursion_unlock(bit);
332 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
334 int arch_prepare_kprobe_ftrace(struct kprobe *p)
336 p->ainsn.insn = NULL;