1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes (KProbes)
5 * Copyright (C) IBM Corporation, 2002, 2004
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation ( includes contributions from
10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11 * interface to access function arguments.
12 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
14 * 2005-Mar Roland McGrath <roland@redhat.com>
15 * Fixed to handle %rip-relative addressing mode correctly.
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
19 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
20 * Added function return probes functionality
21 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22 * kprobe-booster and kretprobe-booster for i386.
23 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24 * and kretprobe-booster for x86-64
25 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27 * unified x86 kprobes code.
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/perf_event.h>
37 #include <linux/extable.h>
38 #include <linux/kdebug.h>
39 #include <linux/kallsyms.h>
40 #include <linux/ftrace.h>
41 #include <linux/kasan.h>
42 #include <linux/moduleloader.h>
43 #include <linux/objtool.h>
44 #include <linux/vmalloc.h>
45 #include <linux/pgtable.h>
47 #include <asm/text-patching.h>
48 #include <asm/cacheflush.h>
50 #include <linux/uaccess.h>
51 #include <asm/alternative.h>
53 #include <asm/debugreg.h>
54 #include <asm/set_memory.h>
58 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
59 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
61 #define stack_addr(regs) ((unsigned long *)regs->sp)
63 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
64 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
65 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
66 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
67 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
70 * Undefined/reserved opcodes, conditional jump, Opcode Extension
71 * Groups, and some special opcodes can not boost.
72 * This is non-const and volatile to keep gcc from statically
73 * optimizing it out, as variable_test_bit makes gcc think only
74 * *(unsigned long*) is used.
76 static volatile u32 twobyte_is_boostable[256 / 32] = {
77 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
78 /* ---------------------------------------------- */
79 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
80 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
81 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
82 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
83 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
84 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
85 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
86 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
87 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
88 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
89 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
90 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
91 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
92 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
93 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
94 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
95 /* ----------------------------------------------- */
96 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
100 struct kretprobe_blackpoint kretprobe_blacklist[] = {
101 {"__switch_to", }, /* This function switches only current task, but
102 doesn't switch kernel stack.*/
103 {NULL, NULL} /* Terminator */
106 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
108 static nokprobe_inline void
109 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
111 struct __arch_relative_insn {
116 insn = (struct __arch_relative_insn *)dest;
117 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
121 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
122 void synthesize_reljump(void *dest, void *from, void *to)
124 __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
126 NOKPROBE_SYMBOL(synthesize_reljump);
128 /* Insert a call instruction at address 'from', which calls address 'to'.*/
129 void synthesize_relcall(void *dest, void *from, void *to)
131 __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
133 NOKPROBE_SYMBOL(synthesize_relcall);
136 * Returns non-zero if INSN is boostable.
137 * RIP relative instructions are adjusted at copying time in 64 bits mode
139 int can_boost(struct insn *insn, void *addr)
141 kprobe_opcode_t opcode;
143 if (search_exception_tables((unsigned long)addr))
144 return 0; /* Page fault may occur on this address. */
146 /* 2nd-byte opcode */
147 if (insn->opcode.nbytes == 2)
148 return test_bit(insn->opcode.bytes[1],
149 (unsigned long *)twobyte_is_boostable);
151 if (insn->opcode.nbytes != 1)
154 /* Can't boost Address-size override prefix */
155 if (unlikely(inat_is_address_size_prefix(insn->attr)))
158 opcode = insn->opcode.bytes[0];
160 switch (opcode & 0xf0) {
162 /* can't boost "bound" */
163 return (opcode != 0x62);
165 return 0; /* can't boost conditional jump */
167 return opcode != 0x9a; /* can't boost call far */
169 /* can't boost software-interruptions */
170 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
172 /* can boost AA* and XLAT */
173 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
175 /* can boost in/out and absolute jmps */
176 return ((opcode & 0x04) || opcode == 0xea);
178 /* clear and set flags are boostable */
179 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
181 /* indirect jmp is boostable */
182 return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
184 /* CS override prefix and call are not boostable */
185 return (opcode != 0x2e && opcode != 0x9a);
190 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
195 kp = get_kprobe((void *)addr);
196 faddr = ftrace_location(addr);
198 * Addresses inside the ftrace location are refused by
199 * arch_check_ftrace_location(). Something went terribly wrong
200 * if such an address is checked here.
202 if (WARN_ON(faddr && faddr != addr))
205 * Use the current code if it is not modified by Kprobe
206 * and it cannot be modified by ftrace.
212 * Basically, kp->ainsn.insn has an original instruction.
213 * However, RIP-relative instruction can not do single-stepping
214 * at different place, __copy_instruction() tweaks the displacement of
215 * that instruction. In that case, we can't recover the instruction
216 * from the kp->ainsn.insn.
218 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
219 * of the first byte of the probed instruction, which is overwritten
220 * by int3. And the instruction at kp->addr is not modified by kprobes
221 * except for the first byte, we can recover the original instruction
222 * from it and kp->opcode.
224 * In case of Kprobes using ftrace, we do not have a copy of
225 * the original instruction. In fact, the ftrace location might
226 * be modified at anytime and even could be in an inconsistent state.
227 * Fortunately, we know that the original code is the ideal 5-byte
230 if (copy_from_kernel_nofault(buf, (void *)addr,
231 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
235 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
238 return (unsigned long)buf;
242 * Recover the probed instruction at addr for further analysis.
243 * Caller must lock kprobes by kprobe_mutex, or disable preemption
244 * for preventing to release referencing kprobes.
245 * Returns zero if the instruction can not get recovered (or access failed).
247 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
249 unsigned long __addr;
251 __addr = __recover_optprobed_insn(buf, addr);
255 return __recover_probed_insn(buf, addr);
258 /* Check if paddr is at an instruction boundary */
259 static int can_probe(unsigned long paddr)
261 unsigned long addr, __addr, offset = 0;
263 kprobe_opcode_t buf[MAX_INSN_SIZE];
265 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
268 /* Decode instructions */
269 addr = paddr - offset;
270 while (addr < paddr) {
274 * Check if the instruction has been modified by another
275 * kprobe, in which case we replace the breakpoint by the
276 * original instruction in our buffer.
277 * Also, jump optimization will change the breakpoint to
278 * relative-jump. Since the relative-jump itself is
279 * normally used, we just go through if there is no kprobe.
281 __addr = recover_probed_instruction(buf, addr);
285 ret = insn_decode(&insn, (void *)__addr, MAX_INSN_SIZE, INSN_MODE_KERN);
290 * Another debugging subsystem might insert this breakpoint.
291 * In that case, we can't recover it.
293 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
298 return (addr == paddr);
302 * Copy an instruction with recovering modified instruction by kprobes
303 * and adjust the displacement if the instruction uses the %rip-relative
304 * addressing mode. Note that since @real will be the final place of copied
305 * instruction, displacement must be adjust by @real, not @dest.
306 * This returns the length of copied instruction, or 0 if it has an error.
308 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
310 kprobe_opcode_t buf[MAX_INSN_SIZE];
311 unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
314 if (!recovered_insn || !insn)
317 /* This can access kernel text if given address is not recovered */
318 if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
322 ret = insn_decode(insn, dest, MAX_INSN_SIZE, INSN_MODE_KERN);
326 /* We can not probe force emulate prefixed instruction */
327 if (insn_has_emulate_prefix(insn))
330 /* Another subsystem puts a breakpoint, failed to recover */
331 if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
334 /* We should not singlestep on the exception masking instructions */
335 if (insn_masking_exception(insn))
339 /* Only x86_64 has RIP relative instructions */
340 if (insn_rip_relative(insn)) {
344 * The copied instruction uses the %rip-relative addressing
345 * mode. Adjust the displacement for the difference between
346 * the original location of this instruction and the location
347 * of the copy that will actually be run. The tricky bit here
348 * is making sure that the sign extension happens correctly in
349 * this calculation, since we need a signed 32-bit result to
350 * be sign-extended to 64 bits when it's added to the %rip
351 * value and yield the same 64-bit result that the sign-
352 * extension of the original signed 32-bit displacement would
355 newdisp = (u8 *) src + (s64) insn->displacement.value
357 if ((s64) (s32) newdisp != newdisp) {
358 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
361 disp = (u8 *) dest + insn_offset_displacement(insn);
362 *(s32 *) disp = (s32) newdisp;
368 /* Prepare reljump or int3 right after instruction */
369 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
372 int len = insn->length;
374 if (!IS_ENABLED(CONFIG_PREEMPTION) &&
375 !p->post_handler && can_boost(insn, p->addr) &&
376 MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
378 * These instructions can be executed directly if it
379 * jumps back to correct address.
381 synthesize_reljump(buf + len, p->ainsn.insn + len,
382 p->addr + insn->length);
383 len += JMP32_INSN_SIZE;
384 p->ainsn.boostable = 1;
386 /* Otherwise, put an int3 for trapping singlestep */
387 if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
390 buf[len] = INT3_INSN_OPCODE;
391 len += INT3_INSN_SIZE;
397 /* Make page to RO mode when allocate it */
398 void *alloc_insn_page(void)
402 page = module_alloc(PAGE_SIZE);
406 set_vm_flush_reset_perms(page);
408 * First make the page read-only, and only then make it executable to
409 * prevent it from being W+X in between.
411 set_memory_ro((unsigned long)page, 1);
414 * TODO: Once additional kernel code protection mechanisms are set, ensure
415 * that the page was not maliciously altered and it is still zeroed.
417 set_memory_x((unsigned long)page, 1);
422 /* Recover page to RW mode before releasing it */
423 void free_insn_page(void *page)
425 module_memfree(page);
428 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
430 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
432 switch (p->ainsn.opcode) {
434 regs->flags &= ~(X86_EFLAGS_IF);
437 regs->flags |= X86_EFLAGS_IF;
439 case 0x9c: /* pushf */
440 int3_emulate_push(regs, regs->flags);
442 case 0x9d: /* popf */
443 regs->flags = int3_emulate_pop(regs);
446 regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
448 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
450 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
452 int3_emulate_ret(regs);
454 NOKPROBE_SYMBOL(kprobe_emulate_ret);
456 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
458 unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
460 func += p->ainsn.rel32;
461 int3_emulate_call(regs, func);
463 NOKPROBE_SYMBOL(kprobe_emulate_call);
465 static nokprobe_inline
466 void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
468 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
471 ip += p->ainsn.rel32;
472 int3_emulate_jmp(regs, ip);
475 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
477 __kprobe_emulate_jmp(p, regs, true);
479 NOKPROBE_SYMBOL(kprobe_emulate_jmp);
481 static const unsigned long jcc_mask[6] = {
485 [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
490 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
492 bool invert = p->ainsn.jcc.type & 1;
495 if (p->ainsn.jcc.type < 0xc) {
496 match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
498 match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
499 ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
500 if (p->ainsn.jcc.type >= 0xe)
501 match = match && (regs->flags & X86_EFLAGS_ZF);
503 __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
505 NOKPROBE_SYMBOL(kprobe_emulate_jcc);
507 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
511 if (p->ainsn.loop.type != 3) { /* LOOP* */
512 if (p->ainsn.loop.asize == 32)
513 match = ((*(u32 *)®s->cx)--) != 0;
515 else if (p->ainsn.loop.asize == 64)
516 match = ((*(u64 *)®s->cx)--) != 0;
519 match = ((*(u16 *)®s->cx)--) != 0;
521 if (p->ainsn.loop.asize == 32)
522 match = *(u32 *)(®s->cx) == 0;
524 else if (p->ainsn.loop.asize == 64)
525 match = *(u64 *)(®s->cx) == 0;
528 match = *(u16 *)(®s->cx) == 0;
531 if (p->ainsn.loop.type == 0) /* LOOPNE */
532 match = match && !(regs->flags & X86_EFLAGS_ZF);
533 else if (p->ainsn.loop.type == 1) /* LOOPE */
534 match = match && (regs->flags & X86_EFLAGS_ZF);
536 __kprobe_emulate_jmp(p, regs, match);
538 NOKPROBE_SYMBOL(kprobe_emulate_loop);
540 static const int addrmode_regoffs[] = {
541 offsetof(struct pt_regs, ax),
542 offsetof(struct pt_regs, cx),
543 offsetof(struct pt_regs, dx),
544 offsetof(struct pt_regs, bx),
545 offsetof(struct pt_regs, sp),
546 offsetof(struct pt_regs, bp),
547 offsetof(struct pt_regs, si),
548 offsetof(struct pt_regs, di),
550 offsetof(struct pt_regs, r8),
551 offsetof(struct pt_regs, r9),
552 offsetof(struct pt_regs, r10),
553 offsetof(struct pt_regs, r11),
554 offsetof(struct pt_regs, r12),
555 offsetof(struct pt_regs, r13),
556 offsetof(struct pt_regs, r14),
557 offsetof(struct pt_regs, r15),
561 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
563 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
565 int3_emulate_call(regs, regs_get_register(regs, offs));
567 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
569 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
571 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
573 int3_emulate_jmp(regs, regs_get_register(regs, offs));
575 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
577 static int prepare_emulation(struct kprobe *p, struct insn *insn)
579 insn_byte_t opcode = insn->opcode.bytes[0];
584 case 0x9c: /* pushfl */
585 case 0x9d: /* popf/popfd */
587 * IF modifiers must be emulated since it will enable interrupt while
588 * int3 single stepping.
590 p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
591 p->ainsn.opcode = opcode;
593 case 0xc2: /* ret/lret */
597 p->ainsn.emulate_op = kprobe_emulate_ret;
599 case 0x9a: /* far call absolute -- segment is not supported */
600 case 0xea: /* far jmp absolute -- segment is not supported */
601 case 0xcc: /* int3 */
602 case 0xcf: /* iret -- in-kernel IRET is not supported */
605 case 0xe8: /* near call relative */
606 p->ainsn.emulate_op = kprobe_emulate_call;
607 if (insn->immediate.nbytes == 2)
608 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
610 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
612 case 0xeb: /* short jump relative */
613 case 0xe9: /* near jump relative */
614 p->ainsn.emulate_op = kprobe_emulate_jmp;
615 if (insn->immediate.nbytes == 1)
616 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
617 else if (insn->immediate.nbytes == 2)
618 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
620 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
623 /* 1 byte conditional jump */
624 p->ainsn.emulate_op = kprobe_emulate_jcc;
625 p->ainsn.jcc.type = opcode & 0xf;
626 p->ainsn.rel32 = *(char *)insn->immediate.bytes;
629 opcode = insn->opcode.bytes[1];
630 if ((opcode & 0xf0) == 0x80) {
631 /* 2 bytes Conditional Jump */
632 p->ainsn.emulate_op = kprobe_emulate_jcc;
633 p->ainsn.jcc.type = opcode & 0xf;
634 if (insn->immediate.nbytes == 2)
635 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
637 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
638 } else if (opcode == 0x01 &&
639 X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
640 X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
641 /* VM extensions - not supported */
645 case 0xe0: /* Loop NZ */
646 case 0xe1: /* Loop */
647 case 0xe2: /* Loop */
648 case 0xe3: /* J*CXZ */
649 p->ainsn.emulate_op = kprobe_emulate_loop;
650 p->ainsn.loop.type = opcode & 0x3;
651 p->ainsn.loop.asize = insn->addr_bytes * 8;
652 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
656 * Since the 0xff is an extended group opcode, the instruction
657 * is determined by the MOD/RM byte.
659 opcode = insn->modrm.bytes[0];
660 if ((opcode & 0x30) == 0x10) {
661 if ((opcode & 0x8) == 0x8)
662 return -EOPNOTSUPP; /* far call */
663 /* call absolute, indirect */
664 p->ainsn.emulate_op = kprobe_emulate_call_indirect;
665 } else if ((opcode & 0x30) == 0x20) {
666 if ((opcode & 0x8) == 0x8)
667 return -EOPNOTSUPP; /* far jmp */
668 /* jmp near absolute indirect */
669 p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
673 if (insn->addr_bytes != sizeof(unsigned long))
674 return -EOPNOTSUPP; /* Don't support differnt size */
675 if (X86_MODRM_MOD(opcode) != 3)
676 return -EOPNOTSUPP; /* TODO: support memory addressing */
678 p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
680 if (X86_REX_B(insn->rex_prefix.value))
681 p->ainsn.indirect.reg += 8;
687 p->ainsn.size = insn->length;
692 static int arch_copy_kprobe(struct kprobe *p)
695 kprobe_opcode_t buf[MAX_INSN_SIZE];
698 /* Copy an instruction with recovering if other optprobe modifies it.*/
699 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
703 /* Analyze the opcode and setup emulate functions */
704 ret = prepare_emulation(p, &insn);
708 /* Add int3 for single-step or booster jmp */
709 len = prepare_singlestep(buf, p, &insn);
713 /* Also, displacement change doesn't affect the first byte */
716 p->ainsn.tp_len = len;
717 perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
719 /* OK, write back the instruction(s) into ROX insn buffer */
720 text_poke(p->ainsn.insn, buf, len);
725 int arch_prepare_kprobe(struct kprobe *p)
729 if (alternatives_text_reserved(p->addr, p->addr))
732 if (!can_probe((unsigned long)p->addr))
735 memset(&p->ainsn, 0, sizeof(p->ainsn));
737 /* insn: must be on special executable page on x86. */
738 p->ainsn.insn = get_insn_slot();
742 ret = arch_copy_kprobe(p);
744 free_insn_slot(p->ainsn.insn, 0);
745 p->ainsn.insn = NULL;
751 void arch_arm_kprobe(struct kprobe *p)
753 u8 int3 = INT3_INSN_OPCODE;
755 text_poke(p->addr, &int3, 1);
757 perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
760 void arch_disarm_kprobe(struct kprobe *p)
762 u8 int3 = INT3_INSN_OPCODE;
764 perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
765 text_poke(p->addr, &p->opcode, 1);
769 void arch_remove_kprobe(struct kprobe *p)
772 /* Record the perf event before freeing the slot */
773 perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
774 p->ainsn.tp_len, NULL, 0);
775 free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
776 p->ainsn.insn = NULL;
780 static nokprobe_inline void
781 save_previous_kprobe(struct kprobe_ctlblk *kcb)
783 kcb->prev_kprobe.kp = kprobe_running();
784 kcb->prev_kprobe.status = kcb->kprobe_status;
785 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
786 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
789 static nokprobe_inline void
790 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
792 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
793 kcb->kprobe_status = kcb->prev_kprobe.status;
794 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
795 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
798 static nokprobe_inline void
799 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
800 struct kprobe_ctlblk *kcb)
802 __this_cpu_write(current_kprobe, p);
803 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
804 = (regs->flags & X86_EFLAGS_IF);
807 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
809 unsigned long *sara = stack_addr(regs);
811 ri->ret_addr = (kprobe_opcode_t *) *sara;
814 /* Replace the return addr with trampoline addr */
815 *sara = (unsigned long) &kretprobe_trampoline;
817 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
819 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
820 struct kprobe_ctlblk *kcb)
822 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
823 kcb->kprobe_status = KPROBE_HIT_SSDONE;
824 cur->post_handler(cur, regs, 0);
827 /* Restore back the original saved kprobes variables and continue. */
828 if (kcb->kprobe_status == KPROBE_REENTER)
829 restore_previous_kprobe(kcb);
831 reset_current_kprobe();
833 NOKPROBE_SYMBOL(kprobe_post_process);
835 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
836 struct kprobe_ctlblk *kcb, int reenter)
838 if (setup_detour_execution(p, regs, reenter))
841 #if !defined(CONFIG_PREEMPTION)
842 if (p->ainsn.boostable) {
843 /* Boost up -- we can execute copied instructions directly */
845 reset_current_kprobe();
847 * Reentering boosted probe doesn't reset current_kprobe,
848 * nor set current_kprobe, because it doesn't use single
851 regs->ip = (unsigned long)p->ainsn.insn;
856 save_previous_kprobe(kcb);
857 set_current_kprobe(p, regs, kcb);
858 kcb->kprobe_status = KPROBE_REENTER;
860 kcb->kprobe_status = KPROBE_HIT_SS;
862 if (p->ainsn.emulate_op) {
863 p->ainsn.emulate_op(p, regs);
864 kprobe_post_process(p, regs, kcb);
868 /* Disable interrupt, and set ip register on trampoline */
869 regs->flags &= ~X86_EFLAGS_IF;
870 regs->ip = (unsigned long)p->ainsn.insn;
872 NOKPROBE_SYMBOL(setup_singlestep);
875 * Called after single-stepping. p->addr is the address of the
876 * instruction whose first byte has been replaced by the "int3"
877 * instruction. To avoid the SMP problems that can occur when we
878 * temporarily put back the original opcode to single-step, we
879 * single-stepped a copy of the instruction. The address of this
880 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
881 * right after the copied instruction.
882 * Different from the trap single-step, "int3" single-step can not
883 * handle the instruction which changes the ip register, e.g. jmp,
884 * call, conditional jmp, and the instructions which changes the IF
885 * flags because interrupt must be disabled around the single-stepping.
886 * Such instructions are software emulated, but others are single-stepped
889 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
890 * be adjusted, so that we can resume execution on correct code.
892 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
893 struct kprobe_ctlblk *kcb)
895 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
896 unsigned long orig_ip = (unsigned long)p->addr;
898 /* Restore saved interrupt flag and ip register */
899 regs->flags |= kcb->kprobe_saved_flags;
900 /* Note that regs->ip is executed int3 so must be a step back */
901 regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
903 NOKPROBE_SYMBOL(resume_singlestep);
906 * We have reentered the kprobe_handler(), since another probe was hit while
907 * within the handler. We save the original kprobes variables and just single
908 * step on the instruction of the new probe without calling any user handlers.
910 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
911 struct kprobe_ctlblk *kcb)
913 switch (kcb->kprobe_status) {
914 case KPROBE_HIT_SSDONE:
915 case KPROBE_HIT_ACTIVE:
917 kprobes_inc_nmissed_count(p);
918 setup_singlestep(p, regs, kcb, 1);
921 /* A probe has been hit in the codepath leading up to, or just
922 * after, single-stepping of a probed instruction. This entire
923 * codepath should strictly reside in .kprobes.text section.
924 * Raise a BUG or we'll continue in an endless reentering loop
925 * and eventually a stack overflow.
927 pr_err("Unrecoverable kprobe detected.\n");
931 /* impossible cases */
938 NOKPROBE_SYMBOL(reenter_kprobe);
940 static int nokprobe_inline kprobe_is_ss(struct kprobe_ctlblk *kcb)
942 return (kcb->kprobe_status == KPROBE_HIT_SS ||
943 kcb->kprobe_status == KPROBE_REENTER);
947 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
948 * remain disabled throughout this function.
950 int kprobe_int3_handler(struct pt_regs *regs)
952 kprobe_opcode_t *addr;
954 struct kprobe_ctlblk *kcb;
959 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
961 * We don't want to be preempted for the entire duration of kprobe
962 * processing. Since int3 and debug trap disables irqs and we clear
963 * IF while singlestepping, it must be no preemptible.
966 kcb = get_kprobe_ctlblk();
967 p = get_kprobe(addr);
970 if (kprobe_running()) {
971 if (reenter_kprobe(p, regs, kcb))
974 set_current_kprobe(p, regs, kcb);
975 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
978 * If we have no pre-handler or it returned 0, we
979 * continue with normal processing. If we have a
980 * pre-handler and it returned non-zero, that means
981 * user handler setup registers to exit to another
982 * instruction, we must skip the single stepping.
984 if (!p->pre_handler || !p->pre_handler(p, regs))
985 setup_singlestep(p, regs, kcb, 0);
987 reset_current_kprobe();
990 } else if (kprobe_is_ss(kcb)) {
991 p = kprobe_running();
992 if ((unsigned long)p->ainsn.insn < regs->ip &&
993 (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
994 /* Most provably this is the second int3 for singlestep */
995 resume_singlestep(p, regs, kcb);
996 kprobe_post_process(p, regs, kcb);
1001 if (*addr != INT3_INSN_OPCODE) {
1003 * The breakpoint instruction was removed right
1004 * after we hit it. Another cpu has removed
1005 * either a probepoint or a debugger breakpoint
1006 * at this address. In either case, no further
1007 * handling of this interrupt is appropriate.
1008 * Back up over the (now missing) int3 and run
1009 * the original instruction.
1011 regs->ip = (unsigned long)addr;
1013 } /* else: not a kprobe fault; let the kernel handle it */
1017 NOKPROBE_SYMBOL(kprobe_int3_handler);
1020 * When a retprobed function returns, this code saves registers and
1021 * calls trampoline_handler() runs, which calls the kretprobe's handler.
1025 ".global kretprobe_trampoline\n"
1026 ".type kretprobe_trampoline, @function\n"
1027 "kretprobe_trampoline:\n"
1028 /* We don't bother saving the ss register */
1029 #ifdef CONFIG_X86_64
1033 " movq %rsp, %rdi\n"
1034 " call trampoline_handler\n"
1035 /* Replace saved sp with true return address. */
1036 " movq %rax, 19*8(%rsp)\n"
1043 " movl %esp, %eax\n"
1044 " call trampoline_handler\n"
1045 /* Replace saved sp with true return address. */
1046 " movl %eax, 15*4(%esp)\n"
1051 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
1053 NOKPROBE_SYMBOL(kretprobe_trampoline);
1054 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
1058 * Called from kretprobe_trampoline
1060 __used __visible void *trampoline_handler(struct pt_regs *regs)
1062 /* fixup registers */
1063 regs->cs = __KERNEL_CS;
1064 #ifdef CONFIG_X86_32
1067 regs->ip = (unsigned long)&kretprobe_trampoline;
1068 regs->orig_ax = ~0UL;
1070 return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp);
1072 NOKPROBE_SYMBOL(trampoline_handler);
1074 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1076 struct kprobe *cur = kprobe_running();
1077 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1079 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1080 /* This must happen on single-stepping */
1081 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1082 kcb->kprobe_status != KPROBE_REENTER);
1084 * We are here because the instruction being single
1085 * stepped caused a page fault. We reset the current
1086 * kprobe and the ip points back to the probe address
1087 * and allow the page fault handler to continue as a
1088 * normal page fault.
1090 regs->ip = (unsigned long)cur->addr;
1093 * If the IF flag was set before the kprobe hit,
1096 regs->flags |= kcb->kprobe_old_flags;
1098 if (kcb->kprobe_status == KPROBE_REENTER)
1099 restore_previous_kprobe(kcb);
1101 reset_current_kprobe();
1102 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1103 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1105 * We increment the nmissed count for accounting,
1106 * we can also use npre/npostfault count for accounting
1107 * these specific fault cases.
1109 kprobes_inc_nmissed_count(cur);
1112 * We come here because instructions in the pre/post
1113 * handler caused the page_fault, this could happen
1114 * if handler tries to access user space by
1115 * copy_from_user(), get_user() etc. Let the
1116 * user-specified handler try to fix it first.
1118 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1124 NOKPROBE_SYMBOL(kprobe_fault_handler);
1126 int __init arch_populate_kprobe_blacklist(void)
1128 return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1129 (unsigned long)__entry_text_end);
1132 int __init arch_init_kprobes(void)
1137 int arch_trampoline_kprobe(struct kprobe *p)