1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ftrace.h>
19 #include <linux/frame.h>
20 #include <linux/pgtable.h>
22 #include <asm/text-patching.h>
23 #include <asm/cacheflush.h>
25 #include <linux/uaccess.h>
26 #include <asm/alternative.h>
28 #include <asm/debugreg.h>
29 #include <asm/set_memory.h>
30 #include <asm/sections.h>
31 #include <asm/nospec-branch.h>
35 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
37 struct optimized_kprobe *op;
42 for (i = 0; i < JMP32_INSN_SIZE; i++) {
43 kp = get_kprobe((void *)addr - i);
44 /* This function only handles jump-optimized kprobe */
45 if (kp && kprobe_optimized(kp)) {
46 op = container_of(kp, struct optimized_kprobe, kp);
47 /* If op->list is not empty, op is under optimizing */
48 if (list_empty(&op->list))
56 * If the kprobe can be optimized, original bytes which can be
57 * overwritten by jump destination address. In this case, original
58 * bytes must be recovered from op->optinsn.copied_insn buffer.
60 if (copy_from_kernel_nofault(buf, (void *)addr,
61 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64 if (addr == (unsigned long)kp->addr) {
66 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
68 offs = addr - (unsigned long)kp->addr - 1;
69 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
72 return (unsigned long)buf;
75 static void synthesize_clac(kprobe_opcode_t *addr)
78 * Can't be static_cpu_has() due to how objtool treats this feature bit.
79 * This isn't a fast path anyway.
81 if (!boot_cpu_has(X86_FEATURE_SMAP))
84 /* Replace the NOP3 with CLAC */
90 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
91 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
99 *(unsigned long *)addr = val;
103 ".pushsection .rodata\n"
104 "optprobe_template_func:\n"
105 ".global optprobe_template_entry\n"
106 "optprobe_template_entry:\n"
108 /* We don't bother saving the ss register */
111 ".global optprobe_template_clac\n"
112 "optprobe_template_clac:\n"
116 ".global optprobe_template_val\n"
117 "optprobe_template_val:\n"
120 ".global optprobe_template_call\n"
121 "optprobe_template_call:\n"
123 /* Move flags to rsp */
124 " movq 18*8(%rsp), %rdx\n"
125 " movq %rdx, 19*8(%rsp)\n"
127 /* Skip flags entry */
130 #else /* CONFIG_X86_32 */
133 ".global optprobe_template_clac\n"
134 "optprobe_template_clac:\n"
138 ".global optprobe_template_val\n"
139 "optprobe_template_val:\n"
141 ".global optprobe_template_call\n"
142 "optprobe_template_call:\n"
144 /* Move flags into esp */
145 " movl 14*4(%esp), %edx\n"
146 " movl %edx, 15*4(%esp)\n"
148 /* Skip flags entry */
152 ".global optprobe_template_end\n"
153 "optprobe_template_end:\n"
156 void optprobe_template_func(void);
157 STACK_FRAME_NON_STANDARD(optprobe_template_func);
159 #define TMPL_CLAC_IDX \
160 ((long)optprobe_template_clac - (long)optprobe_template_entry)
161 #define TMPL_MOVE_IDX \
162 ((long)optprobe_template_val - (long)optprobe_template_entry)
163 #define TMPL_CALL_IDX \
164 ((long)optprobe_template_call - (long)optprobe_template_entry)
165 #define TMPL_END_IDX \
166 ((long)optprobe_template_end - (long)optprobe_template_entry)
168 /* Optimized kprobe call back function: called from optinsn */
170 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
172 /* This is possible if op is under delayed unoptimizing */
173 if (kprobe_disabled(&op->kp))
177 if (kprobe_running()) {
178 kprobes_inc_nmissed_count(&op->kp);
180 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
181 /* Save skipped registers */
182 regs->cs = __KERNEL_CS;
184 regs->cs |= get_kernel_rpl();
187 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
188 regs->orig_ax = ~0UL;
190 __this_cpu_write(current_kprobe, &op->kp);
191 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
192 opt_pre_handler(&op->kp, regs);
193 __this_cpu_write(current_kprobe, NULL);
197 NOKPROBE_SYMBOL(optimized_callback);
199 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
204 while (len < JMP32_INSN_SIZE) {
205 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
206 if (!ret || !can_boost(&insn, src + len))
210 /* Check whether the address range is reserved */
211 if (ftrace_text_reserved(src, src + len - 1) ||
212 alternatives_text_reserved(src, src + len - 1) ||
213 jump_label_text_reserved(src, src + len - 1))
219 /* Check whether insn is indirect jump */
220 static int __insn_is_indirect_jump(struct insn *insn)
222 return ((insn->opcode.bytes[0] == 0xff &&
223 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
224 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
227 /* Check whether insn jumps into specified address range */
228 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
230 unsigned long target = 0;
232 switch (insn->opcode.bytes[0]) {
233 case 0xe0: /* loopne */
234 case 0xe1: /* loope */
235 case 0xe2: /* loop */
236 case 0xe3: /* jcxz */
237 case 0xe9: /* near relative jump */
238 case 0xeb: /* short relative jump */
241 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
245 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
249 target = (unsigned long)insn->next_byte + insn->immediate.value;
251 return (start <= target && target <= start + len);
254 static int insn_is_indirect_jump(struct insn *insn)
256 int ret = __insn_is_indirect_jump(insn);
258 #ifdef CONFIG_RETPOLINE
260 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
261 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
262 * older gcc may use indirect jump. So we add this check instead of
263 * replace indirect-jump check.
266 ret = insn_jump_into_range(insn,
267 (unsigned long)__indirect_thunk_start,
268 (unsigned long)__indirect_thunk_end -
269 (unsigned long)__indirect_thunk_start);
274 /* Decode whole function to ensure any instructions don't jump into target */
275 static int can_optimize(unsigned long paddr)
277 unsigned long addr, size = 0, offset = 0;
279 kprobe_opcode_t buf[MAX_INSN_SIZE];
281 /* Lookup symbol including addr */
282 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
286 * Do not optimize in the entry code due to the unstable
287 * stack handling and registers setup.
289 if (((paddr >= (unsigned long)__entry_text_start) &&
290 (paddr < (unsigned long)__entry_text_end)))
293 /* Check there is enough space for a relative jump. */
294 if (size - offset < JMP32_INSN_SIZE)
297 /* Decode instructions */
298 addr = paddr - offset;
299 while (addr < paddr - offset + size) { /* Decode until function end */
300 unsigned long recovered_insn;
301 if (search_exception_tables(addr))
303 * Since some fixup code will jumps into this function,
304 * we can't optimize kprobe in this function.
307 recovered_insn = recover_probed_instruction(buf, addr);
310 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
311 insn_get_length(&insn);
312 /* Another subsystem puts a breakpoint */
313 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
315 /* Recover address */
316 insn.kaddr = (void *)addr;
317 insn.next_byte = (void *)(addr + insn.length);
318 /* Check any instructions don't jump into target */
319 if (insn_is_indirect_jump(&insn) ||
320 insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
329 /* Check optimized_kprobe can actually be optimized. */
330 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
335 for (i = 1; i < op->optinsn.size; i++) {
336 p = get_kprobe(op->kp.addr + i);
337 if (p && !kprobe_disabled(p))
344 /* Check the addr is within the optimized instructions. */
345 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
348 return ((unsigned long)op->kp.addr <= addr &&
349 (unsigned long)op->kp.addr + op->optinsn.size > addr);
352 /* Free optimized instruction slot */
354 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
356 u8 *slot = op->optinsn.insn;
358 int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
360 /* Record the perf event before freeing the slot */
362 perf_event_text_poke(slot, slot, len, NULL, 0);
364 free_optinsn_slot(slot, dirty);
365 op->optinsn.insn = NULL;
366 op->optinsn.size = 0;
370 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
372 __arch_remove_optimized_kprobe(op, 1);
376 * Copy replacing target instructions
377 * Target instructions MUST be relocatable (checked inside)
378 * This is called when new aggr(opt)probe is allocated or reused.
380 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
381 struct kprobe *__unused)
383 u8 *buf = NULL, *slot;
387 if (!can_optimize((unsigned long)op->kp.addr))
390 buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
394 op->optinsn.insn = slot = get_optinsn_slot();
401 * Verify if the address gap is in 2GB range, because this uses
404 rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
405 if (abs(rel) > 0x7fffffff) {
410 /* Copy arch-dep-instance from template */
411 memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
413 /* Copy instructions into the out-of-line buffer */
414 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
415 slot + TMPL_END_IDX);
418 op->optinsn.size = ret;
419 len = TMPL_END_IDX + op->optinsn.size;
421 synthesize_clac(buf + TMPL_CLAC_IDX);
423 /* Set probe information */
424 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
426 /* Set probe function call */
427 synthesize_relcall(buf + TMPL_CALL_IDX,
428 slot + TMPL_CALL_IDX, optimized_callback);
430 /* Set returning jmp instruction at the tail of out-of-line buffer */
431 synthesize_reljump(buf + len, slot + len,
432 (u8 *)op->kp.addr + op->optinsn.size);
433 len += JMP32_INSN_SIZE;
436 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
437 * used in __arch_remove_optimized_kprobe().
440 /* We have to use text_poke() for instruction buffer because it is RO */
441 perf_event_text_poke(slot, NULL, 0, buf, len);
442 text_poke(slot, buf, len);
450 __arch_remove_optimized_kprobe(op, 0);
455 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
456 * Caller must call with locking kprobe_mutex and text_mutex.
458 * The caller will have installed a regular kprobe and after that issued
459 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
460 * the 4 bytes after the INT3 are unused and can now be overwritten.
462 void arch_optimize_kprobes(struct list_head *oplist)
464 struct optimized_kprobe *op, *tmp;
465 u8 insn_buff[JMP32_INSN_SIZE];
467 list_for_each_entry_safe(op, tmp, oplist, list) {
468 s32 rel = (s32)((long)op->optinsn.insn -
469 ((long)op->kp.addr + JMP32_INSN_SIZE));
471 WARN_ON(kprobe_disabled(&op->kp));
473 /* Backup instructions which will be replaced by jump address */
474 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
477 insn_buff[0] = JMP32_INSN_OPCODE;
478 *(s32 *)(&insn_buff[1]) = rel;
480 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
482 list_del_init(&op->list);
487 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
489 * After that, we can restore the 4 bytes after the INT3 to undo what
490 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
491 * unused once the INT3 lands.
493 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
495 u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
496 u8 old[JMP32_INSN_SIZE];
497 u8 *addr = op->kp.addr;
499 memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
500 memcpy(new + INT3_INSN_SIZE,
501 op->optinsn.copied_insn,
502 JMP32_INSN_SIZE - INT3_INSN_SIZE);
504 text_poke(addr, new, INT3_INSN_SIZE);
506 text_poke(addr + INT3_INSN_SIZE,
507 new + INT3_INSN_SIZE,
508 JMP32_INSN_SIZE - INT3_INSN_SIZE);
511 perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
515 * Recover original instructions and breakpoints from relative jumps.
516 * Caller must call with locking kprobe_mutex.
518 extern void arch_unoptimize_kprobes(struct list_head *oplist,
519 struct list_head *done_list)
521 struct optimized_kprobe *op, *tmp;
523 list_for_each_entry_safe(op, tmp, oplist, list) {
524 arch_unoptimize_kprobe(op);
525 list_move(&op->list, done_list);
529 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
531 struct optimized_kprobe *op;
533 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
534 /* This kprobe is really able to run optimized path. */
535 op = container_of(p, struct optimized_kprobe, kp);
536 /* Detour through copied instructions */
537 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
539 reset_current_kprobe();
544 NOKPROBE_SYMBOL(setup_detour_execution);