1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
11 #include <linux/vmalloc.h>
12 #include <linux/memory.h>
13 #include <linux/stop_machine.h>
14 #include <linux/slab.h>
15 #include <linux/kdebug.h>
16 #include <linux/kprobes.h>
17 #include <linux/mmu_context.h>
18 #include <linux/bsearch.h>
19 #include <linux/sync_core.h>
20 #include <asm/text-patching.h>
21 #include <asm/alternative.h>
22 #include <asm/sections.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
29 #include <asm/fixmap.h>
31 int __read_mostly alternatives_patched;
33 EXPORT_SYMBOL_GPL(alternatives_patched);
35 #define MAX_PATCH_LEN (255-1)
37 static int __initdata_or_module debug_alternative;
39 static int __init debug_alt(char *str)
41 debug_alternative = 1;
44 __setup("debug-alternative", debug_alt);
46 static int noreplace_smp;
48 static int __init setup_noreplace_smp(char *str)
53 __setup("noreplace-smp", setup_noreplace_smp);
55 #define DPRINTK(fmt, args...) \
57 if (debug_alternative) \
58 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
61 #define DUMP_BYTES(buf, len, fmt, args...) \
63 if (unlikely(debug_alternative)) { \
69 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
70 for (j = 0; j < (len) - 1; j++) \
71 printk(KERN_CONT "%02hhx ", buf[j]); \
72 printk(KERN_CONT "%02hhx\n", buf[j]); \
77 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
78 * that correspond to that nop. Getting from one nop to the next, we
79 * add to the array the offset that is equal to the sum of all sizes of
80 * nops preceding the one we are after.
82 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
83 * nice symmetry of sizes of the previous nops.
85 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
86 static const unsigned char intelnops[] =
98 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
104 intelnops + 1 + 2 + 3,
105 intelnops + 1 + 2 + 3 + 4,
106 intelnops + 1 + 2 + 3 + 4 + 5,
107 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
108 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
109 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
114 static const unsigned char k8nops[] =
126 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
133 k8nops + 1 + 2 + 3 + 4,
134 k8nops + 1 + 2 + 3 + 4 + 5,
135 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
136 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
137 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
141 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
142 static const unsigned char k7nops[] =
154 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
161 k7nops + 1 + 2 + 3 + 4,
162 k7nops + 1 + 2 + 3 + 4 + 5,
163 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
164 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
165 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
170 static const unsigned char p6nops[] =
182 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
189 p6nops + 1 + 2 + 3 + 4,
190 p6nops + 1 + 2 + 3 + 4 + 5,
191 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
192 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
193 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
197 /* Initialize these to a safe default */
199 const unsigned char * const *ideal_nops = p6_nops;
201 const unsigned char * const *ideal_nops = intel_nops;
204 void __init arch_init_ideal_nops(void)
206 switch (boot_cpu_data.x86_vendor) {
207 case X86_VENDOR_INTEL:
209 * Due to a decoder implementation quirk, some
210 * specific Intel CPUs actually perform better with
211 * the "k8_nops" than with the SDM-recommended NOPs.
213 if (boot_cpu_data.x86 == 6 &&
214 boot_cpu_data.x86_model >= 0x0f &&
215 boot_cpu_data.x86_model != 0x1c &&
216 boot_cpu_data.x86_model != 0x26 &&
217 boot_cpu_data.x86_model != 0x27 &&
218 boot_cpu_data.x86_model < 0x30) {
219 ideal_nops = k8_nops;
220 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
221 ideal_nops = p6_nops;
224 ideal_nops = k8_nops;
226 ideal_nops = intel_nops;
231 case X86_VENDOR_HYGON:
232 ideal_nops = p6_nops;
236 if (boot_cpu_data.x86 > 0xf) {
237 ideal_nops = p6_nops;
245 ideal_nops = k8_nops;
247 if (boot_cpu_has(X86_FEATURE_K8))
248 ideal_nops = k8_nops;
249 else if (boot_cpu_has(X86_FEATURE_K7))
250 ideal_nops = k7_nops;
252 ideal_nops = intel_nops;
257 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
258 static void __init_or_module add_nops(void *insns, unsigned int len)
261 unsigned int noplen = len;
262 if (noplen > ASM_NOP_MAX)
263 noplen = ASM_NOP_MAX;
264 memcpy(insns, ideal_nops[noplen], noplen);
270 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
271 extern s32 __smp_locks[], __smp_locks_end[];
272 void text_poke_early(void *addr, const void *opcode, size_t len);
275 * Are we looking at a near JMP with a 1 or 4-byte displacement.
277 static inline bool is_jmp(const u8 opcode)
279 return opcode == 0xeb || opcode == 0xe9;
282 static void __init_or_module
283 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
285 u8 *next_rip, *tgt_rip;
289 if (a->replacementlen != 5)
292 o_dspl = *(s32 *)(insn_buff + 1);
294 /* next_rip of the replacement JMP */
295 next_rip = repl_insn + a->replacementlen;
296 /* target rip of the replacement JMP */
297 tgt_rip = next_rip + o_dspl;
298 n_dspl = tgt_rip - orig_insn;
300 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
302 if (tgt_rip - orig_insn >= 0) {
303 if (n_dspl - 2 <= 127)
307 /* negative offset */
309 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
319 insn_buff[1] = (s8)n_dspl;
320 add_nops(insn_buff + 2, 3);
329 *(s32 *)&insn_buff[1] = n_dspl;
335 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
336 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
340 * "noinline" to cause control flow change and thus invalidate I$ and
341 * cause refetch after modification.
343 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
348 for (i = 0; i < a->padlen; i++) {
349 if (instr[i] != 0x90)
353 local_irq_save(flags);
354 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
355 local_irq_restore(flags);
357 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
358 instr, a->instrlen - a->padlen, a->padlen);
362 * Replace instructions with better alternatives for this CPU type. This runs
363 * before SMP is initialized to avoid SMP problems with self modifying code.
364 * This implies that asymmetric systems where APs have less capabilities than
365 * the boot processor are not handled. Tough. Make sure you disable such
368 * Marked "noinline" to cause control flow change and thus insn cache
369 * to refetch changed I$ lines.
371 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
372 struct alt_instr *end)
375 u8 *instr, *replacement;
376 u8 insn_buff[MAX_PATCH_LEN];
378 DPRINTK("alt table %px, -> %px", start, end);
380 * The scan order should be from start to end. A later scanned
381 * alternative code can overwrite previously scanned alternative code.
382 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
385 * So be careful if you want to change the scan order to any other
388 for (a = start; a < end; a++) {
389 int insn_buff_sz = 0;
391 instr = (u8 *)&a->instr_offset + a->instr_offset;
392 replacement = (u8 *)&a->repl_offset + a->repl_offset;
393 BUG_ON(a->instrlen > sizeof(insn_buff));
394 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
395 if (!boot_cpu_has(a->cpuid)) {
397 optimize_nops(a, instr);
402 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
405 instr, instr, a->instrlen,
406 replacement, a->replacementlen, a->padlen);
408 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
409 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
411 memcpy(insn_buff, replacement, a->replacementlen);
412 insn_buff_sz = a->replacementlen;
415 * 0xe8 is a relative jump; fix the offset.
417 * Instruction length is checked before the opcode to avoid
418 * accessing uninitialized bytes for zero-length replacements.
420 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
421 *(s32 *)(insn_buff + 1) += replacement - instr;
422 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
423 *(s32 *)(insn_buff + 1),
424 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
427 if (a->replacementlen && is_jmp(replacement[0]))
428 recompute_jump(a, instr, replacement, insn_buff);
430 if (a->instrlen > a->replacementlen) {
431 add_nops(insn_buff + a->replacementlen,
432 a->instrlen - a->replacementlen);
433 insn_buff_sz += a->instrlen - a->replacementlen;
435 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
437 text_poke_early(instr, insn_buff, insn_buff_sz);
442 static void alternatives_smp_lock(const s32 *start, const s32 *end,
443 u8 *text, u8 *text_end)
447 for (poff = start; poff < end; poff++) {
448 u8 *ptr = (u8 *)poff + *poff;
450 if (!*poff || ptr < text || ptr >= text_end)
452 /* turn DS segment override prefix into lock prefix */
454 text_poke(ptr, ((unsigned char []){0xf0}), 1);
458 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
459 u8 *text, u8 *text_end)
463 for (poff = start; poff < end; poff++) {
464 u8 *ptr = (u8 *)poff + *poff;
466 if (!*poff || ptr < text || ptr >= text_end)
468 /* turn lock prefix into DS segment override prefix */
470 text_poke(ptr, ((unsigned char []){0x3E}), 1);
474 struct smp_alt_module {
475 /* what is this ??? */
479 /* ptrs to lock prefixes */
481 const s32 *locks_end;
483 /* .text segment, needed to avoid patching init code ;) */
487 struct list_head next;
489 static LIST_HEAD(smp_alt_modules);
490 static bool uniproc_patched = false; /* protected by text_mutex */
492 void __init_or_module alternatives_smp_module_add(struct module *mod,
494 void *locks, void *locks_end,
495 void *text, void *text_end)
497 struct smp_alt_module *smp;
499 mutex_lock(&text_mutex);
500 if (!uniproc_patched)
503 if (num_possible_cpus() == 1)
504 /* Don't bother remembering, we'll never have to undo it. */
507 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
509 /* we'll run the (safe but slow) SMP code then ... */
515 smp->locks_end = locks_end;
517 smp->text_end = text_end;
518 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
519 smp->locks, smp->locks_end,
520 smp->text, smp->text_end, smp->name);
522 list_add_tail(&smp->next, &smp_alt_modules);
524 alternatives_smp_unlock(locks, locks_end, text, text_end);
526 mutex_unlock(&text_mutex);
529 void __init_or_module alternatives_smp_module_del(struct module *mod)
531 struct smp_alt_module *item;
533 mutex_lock(&text_mutex);
534 list_for_each_entry(item, &smp_alt_modules, next) {
535 if (mod != item->mod)
537 list_del(&item->next);
541 mutex_unlock(&text_mutex);
544 void alternatives_enable_smp(void)
546 struct smp_alt_module *mod;
548 /* Why bother if there are no other CPUs? */
549 BUG_ON(num_possible_cpus() == 1);
551 mutex_lock(&text_mutex);
553 if (uniproc_patched) {
554 pr_info("switching to SMP code\n");
555 BUG_ON(num_online_cpus() != 1);
556 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
557 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
558 list_for_each_entry(mod, &smp_alt_modules, next)
559 alternatives_smp_lock(mod->locks, mod->locks_end,
560 mod->text, mod->text_end);
561 uniproc_patched = false;
563 mutex_unlock(&text_mutex);
567 * Return 1 if the address range is reserved for SMP-alternatives.
568 * Must hold text_mutex.
570 int alternatives_text_reserved(void *start, void *end)
572 struct smp_alt_module *mod;
574 u8 *text_start = start;
577 lockdep_assert_held(&text_mutex);
579 list_for_each_entry(mod, &smp_alt_modules, next) {
580 if (mod->text > text_end || mod->text_end < text_start)
582 for (poff = mod->locks; poff < mod->locks_end; poff++) {
583 const u8 *ptr = (const u8 *)poff + *poff;
585 if (text_start <= ptr && text_end > ptr)
592 #endif /* CONFIG_SMP */
594 #ifdef CONFIG_PARAVIRT
595 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
596 struct paravirt_patch_site *end)
598 struct paravirt_patch_site *p;
599 char insn_buff[MAX_PATCH_LEN];
601 for (p = start; p < end; p++) {
604 BUG_ON(p->len > MAX_PATCH_LEN);
605 /* prep the buffer with the original instructions */
606 memcpy(insn_buff, p->instr, p->len);
607 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
609 BUG_ON(used > p->len);
611 /* Pad the rest with nops */
612 add_nops(insn_buff + used, p->len - used);
613 text_poke_early(p->instr, insn_buff, p->len);
616 extern struct paravirt_patch_site __start_parainstructions[],
617 __stop_parainstructions[];
618 #endif /* CONFIG_PARAVIRT */
621 * Self-test for the INT3 based CALL emulation code.
623 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
624 * properly and that there is a stack gap between the INT3 frame and the
625 * previous context. Without this gap doing a virtual PUSH on the interrupted
626 * stack would corrupt the INT3 IRET frame.
628 * See entry_{32,64}.S for more details.
632 * We define the int3_magic() function in assembly to control the calling
633 * convention such that we can 'call' it from assembly.
636 extern void int3_magic(unsigned int *ptr); /* defined in asm */
639 " .pushsection .init.text, \"ax\", @progbits\n"
640 " .type int3_magic, @function\n"
642 " movl $1, (%" _ASM_ARG1 ")\n"
644 " .size int3_magic, .-int3_magic\n"
648 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
651 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
653 struct die_args *args = data;
654 struct pt_regs *regs = args->regs;
656 if (!regs || user_mode(regs))
662 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
665 int3_emulate_call(regs, (unsigned long)&int3_magic);
669 static void __init int3_selftest(void)
671 static __initdata struct notifier_block int3_exception_nb = {
672 .notifier_call = int3_exception_notify,
673 .priority = INT_MAX-1, /* last */
675 unsigned int val = 0;
677 BUG_ON(register_die_notifier(&int3_exception_nb));
680 * Basically: int3_magic(&val); but really complicated :-)
682 * Stick the address of the INT3 instruction into int3_selftest_ip,
683 * then trigger the INT3, padded with NOPs to match a CALL instruction
686 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
687 ".pushsection .init.data,\"aw\"\n\t"
688 ".align " __ASM_SEL(4, 8) "\n\t"
689 ".type int3_selftest_ip, @object\n\t"
690 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
691 "int3_selftest_ip:\n\t"
692 __ASM_SEL(.long, .quad) " 1b\n\t"
694 : ASM_CALL_CONSTRAINT
695 : __ASM_SEL_RAW(a, D) (&val)
700 unregister_die_notifier(&int3_exception_nb);
703 void __init alternative_instructions(void)
708 * The patching is not fully atomic, so try to avoid local
709 * interruptions that might execute the to be patched code.
710 * Other CPUs are not running.
715 * Don't stop machine check exceptions while patching.
716 * MCEs only happen when something got corrupted and in this
717 * case we must do something about the corruption.
718 * Ignoring it is worse than an unlikely patching race.
719 * Also machine checks tend to be broadcast and if one CPU
720 * goes into machine check the others follow quickly, so we don't
721 * expect a machine check to cause undue problems during to code
725 apply_alternatives(__alt_instructions, __alt_instructions_end);
728 /* Patch to UP if other cpus not imminent. */
729 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
730 uniproc_patched = true;
731 alternatives_smp_module_add(NULL, "core kernel",
732 __smp_locks, __smp_locks_end,
736 if (!uniproc_patched || num_possible_cpus() == 1) {
737 free_init_pages("SMP alternatives",
738 (unsigned long)__smp_locks,
739 (unsigned long)__smp_locks_end);
743 apply_paravirt(__parainstructions, __parainstructions_end);
746 alternatives_patched = 1;
750 * text_poke_early - Update instructions on a live kernel at boot time
751 * @addr: address to modify
752 * @opcode: source of the copy
753 * @len: length to copy
755 * When you use this code to patch more than one byte of an instruction
756 * you need to make sure that other CPUs cannot execute this code in parallel.
757 * Also no thread must be currently preempted in the middle of these
758 * instructions. And on the local CPU you need to be protected against NMI or
759 * MCE handlers seeing an inconsistent instruction while you patch.
761 void __init_or_module text_poke_early(void *addr, const void *opcode,
766 if (boot_cpu_has(X86_FEATURE_NX) &&
767 is_module_text_address((unsigned long)addr)) {
769 * Modules text is marked initially as non-executable, so the
770 * code cannot be running and speculative code-fetches are
771 * prevented. Just change the code.
773 memcpy(addr, opcode, len);
775 local_irq_save(flags);
776 memcpy(addr, opcode, len);
777 local_irq_restore(flags);
781 * Could also do a CLFLUSH here to speed up CPU recovery; but
782 * that causes hangs on some VIA CPUs.
788 struct mm_struct *mm;
792 * Using a temporary mm allows to set temporary mappings that are not accessible
793 * by other CPUs. Such mappings are needed to perform sensitive memory writes
794 * that override the kernel memory protections (e.g., W^X), without exposing the
795 * temporary page-table mappings that are required for these write operations to
796 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
797 * mapping is torn down.
799 * Context: The temporary mm needs to be used exclusively by a single core. To
800 * harden security IRQs must be disabled while the temporary mm is
801 * loaded, thereby preventing interrupt handler bugs from overriding
802 * the kernel memory protection.
804 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
806 temp_mm_state_t temp_state;
808 lockdep_assert_irqs_disabled();
809 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
810 switch_mm_irqs_off(NULL, mm, current);
813 * If breakpoints are enabled, disable them while the temporary mm is
814 * used. Userspace might set up watchpoints on addresses that are used
815 * in the temporary mm, which would lead to wrong signals being sent or
818 * Note that breakpoints are not disabled selectively, which also causes
819 * kernel breakpoints (e.g., perf's) to be disabled. This might be
820 * undesirable, but still seems reasonable as the code that runs in the
821 * temporary mm should be short.
823 if (hw_breakpoint_active())
824 hw_breakpoint_disable();
829 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
831 lockdep_assert_irqs_disabled();
832 switch_mm_irqs_off(NULL, prev_state.mm, current);
835 * Restore the breakpoints if they were disabled before the temporary mm
838 if (hw_breakpoint_active())
839 hw_breakpoint_restore();
842 __ro_after_init struct mm_struct *poking_mm;
843 __ro_after_init unsigned long poking_addr;
845 static void *__text_poke(void *addr, const void *opcode, size_t len)
847 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
848 struct page *pages[2] = {NULL};
849 temp_mm_state_t prev;
856 * While boot memory allocator is running we cannot use struct pages as
857 * they are not yet initialized. There is no way to recover.
859 BUG_ON(!after_bootmem);
861 if (!core_kernel_text((unsigned long)addr)) {
862 pages[0] = vmalloc_to_page(addr);
863 if (cross_page_boundary)
864 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
866 pages[0] = virt_to_page(addr);
867 WARN_ON(!PageReserved(pages[0]));
868 if (cross_page_boundary)
869 pages[1] = virt_to_page(addr + PAGE_SIZE);
872 * If something went wrong, crash and burn since recovery paths are not
875 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
877 local_irq_save(flags);
880 * Map the page without the global bit, as TLB flushing is done with
881 * flush_tlb_mm_range(), which is intended for non-global PTEs.
883 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
886 * The lock is not really needed, but this allows to avoid open-coding.
888 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
891 * This must not fail; preallocated in poking_init().
895 pte = mk_pte(pages[0], pgprot);
896 set_pte_at(poking_mm, poking_addr, ptep, pte);
898 if (cross_page_boundary) {
899 pte = mk_pte(pages[1], pgprot);
900 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
904 * Loading the temporary mm behaves as a compiler barrier, which
905 * guarantees that the PTE will be set at the time memcpy() is done.
907 prev = use_temporary_mm(poking_mm);
909 kasan_disable_current();
910 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
911 kasan_enable_current();
914 * Ensure that the PTE is only cleared after the instructions of memcpy
915 * were issued by using a compiler barrier.
919 pte_clear(poking_mm, poking_addr, ptep);
920 if (cross_page_boundary)
921 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
924 * Loading the previous page-table hierarchy requires a serializing
925 * instruction that already allows the core to see the updated version.
926 * Xen-PV is assumed to serialize execution in a similar manner.
928 unuse_temporary_mm(prev);
931 * Flushing the TLB might involve IPIs, which would require enabled
932 * IRQs, but not if the mm is not used, as it is in this point.
934 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
935 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
939 * If the text does not match what we just wrote then something is
940 * fundamentally screwy; there's nothing we can really do about that.
942 BUG_ON(memcmp(addr, opcode, len));
944 pte_unmap_unlock(ptep, ptl);
945 local_irq_restore(flags);
950 * text_poke - Update instructions on a live kernel
951 * @addr: address to modify
952 * @opcode: source of the copy
953 * @len: length to copy
955 * Only atomic text poke/set should be allowed when not doing early patching.
956 * It means the size must be writable atomically and the address must be aligned
957 * in a way that permits an atomic write. It also makes sure we fit on a single
960 * Note that the caller must ensure that if the modified code is part of a
961 * module, the module would not be removed during poking. This can be achieved
962 * by registering a module notifier, and ordering module removal and patching
965 void *text_poke(void *addr, const void *opcode, size_t len)
967 lockdep_assert_held(&text_mutex);
969 return __text_poke(addr, opcode, len);
973 * text_poke_kgdb - Update instructions on a live kernel by kgdb
974 * @addr: address to modify
975 * @opcode: source of the copy
976 * @len: length to copy
978 * Only atomic text poke/set should be allowed when not doing early patching.
979 * It means the size must be writable atomically and the address must be aligned
980 * in a way that permits an atomic write. It also makes sure we fit on a single
983 * Context: should only be used by kgdb, which ensures no other core is running,
984 * despite the fact it does not hold the text_mutex.
986 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
988 return __text_poke(addr, opcode, len);
991 static void do_sync_core(void *info)
996 void text_poke_sync(void)
998 on_each_cpu(do_sync_core, NULL, 1);
1001 struct text_poke_loc {
1002 s32 rel_addr; /* addr := _stext + rel_addr */
1005 const u8 text[POKE_MAX_OPCODE_SIZE];
1009 struct bp_patching_desc {
1010 struct text_poke_loc *vec;
1015 static struct bp_patching_desc *bp_desc;
1017 static __always_inline
1018 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1020 struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
1022 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1028 static __always_inline void put_desc(struct bp_patching_desc *desc)
1030 smp_mb__before_atomic();
1031 arch_atomic_dec(&desc->refs);
1034 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1036 return _stext + tp->rel_addr;
1039 static __always_inline int patch_cmp(const void *key, const void *elt)
1041 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1043 if (key < text_poke_addr(tp))
1045 if (key > text_poke_addr(tp))
1050 int noinstr poke_int3_handler(struct pt_regs *regs)
1052 struct bp_patching_desc *desc;
1053 struct text_poke_loc *tp;
1057 if (user_mode(regs))
1061 * Having observed our INT3 instruction, we now must observe
1064 * bp_desc = desc INT3
1066 * write INT3 if (desc)
1070 desc = try_get_desc(&bp_desc);
1075 * Discount the INT3. See text_poke_bp_batch().
1077 ip = (void *) regs->ip - INT3_INSN_SIZE;
1080 * Skip the binary search if there is a single member in the vector.
1082 if (unlikely(desc->nr_entries > 1)) {
1083 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1084 sizeof(struct text_poke_loc),
1090 if (text_poke_addr(tp) != ip)
1094 len = text_opcode_size(tp->opcode);
1097 switch (tp->opcode) {
1098 case INT3_INSN_OPCODE:
1100 * Someone poked an explicit INT3, they'll want to handle it,
1105 case CALL_INSN_OPCODE:
1106 int3_emulate_call(regs, (long)ip + tp->rel32);
1109 case JMP32_INSN_OPCODE:
1110 case JMP8_INSN_OPCODE:
1111 int3_emulate_jmp(regs, (long)ip + tp->rel32);
1125 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1126 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1127 static int tp_vec_nr;
1130 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1131 * @tp: vector of instructions to patch
1132 * @nr_entries: number of entries in the vector
1134 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1135 * We completely avoid stop_machine() here, and achieve the
1136 * synchronization using int3 breakpoint.
1138 * The way it is done:
1139 * - For each entry in the vector:
1140 * - add a int3 trap to the address that will be patched
1142 * - For each entry in the vector:
1143 * - update all but the first byte of the patched range
1145 * - For each entry in the vector:
1146 * - replace the first byte (int3) by the first byte of
1150 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1152 struct bp_patching_desc desc = {
1154 .nr_entries = nr_entries,
1155 .refs = ATOMIC_INIT(1),
1157 unsigned char int3 = INT3_INSN_OPCODE;
1161 lockdep_assert_held(&text_mutex);
1163 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1166 * Corresponding read barrier in int3 notifier for making sure the
1167 * nr_entries and handler are correctly ordered wrt. patching.
1172 * First step: add a int3 trap to the address that will be patched.
1174 for (i = 0; i < nr_entries; i++) {
1175 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1176 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1182 * Second step: update all but the first byte of the patched range.
1184 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1185 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1186 int len = text_opcode_size(tp[i].opcode);
1188 if (len - INT3_INSN_SIZE > 0) {
1189 memcpy(old + INT3_INSN_SIZE,
1190 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1191 len - INT3_INSN_SIZE);
1192 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1193 (const char *)tp[i].text + INT3_INSN_SIZE,
1194 len - INT3_INSN_SIZE);
1199 * Emit a perf event to record the text poke, primarily to
1200 * support Intel PT decoding which must walk the executable code
1201 * to reconstruct the trace. The flow up to here is:
1204 * - write instruction tail
1205 * At this point the actual control flow will be through the
1206 * INT3 and handler and not hit the old or new instruction.
1207 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1208 * can still be decoded. Subsequently:
1209 * - emit RECORD_TEXT_POKE with the new instruction
1211 * - write first byte
1213 * So before the text poke event timestamp, the decoder will see
1214 * either the old instruction flow or FUP/TIP of INT3. After the
1215 * text poke event timestamp, the decoder will see either the
1216 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1217 * use the timestamp as the point at which to modify the
1219 * The old instruction is recorded so that the event can be
1220 * processed forwards or backwards.
1222 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1228 * According to Intel, this core syncing is very likely
1229 * not necessary and we'd be safe even without it. But
1230 * better safe than sorry (plus there's not only Intel).
1236 * Third step: replace the first byte (int3) by the first byte of
1239 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1240 if (tp[i].text[0] == INT3_INSN_OPCODE)
1243 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1251 * Remove and synchronize_rcu(), except we have a very primitive
1252 * refcount based completion.
1254 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1255 if (!atomic_dec_and_test(&desc.refs))
1256 atomic_cond_read_acquire(&desc.refs, !VAL);
1259 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1260 const void *opcode, size_t len, const void *emulate)
1264 memcpy((void *)tp->text, opcode, len);
1268 kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
1269 insn_get_length(&insn);
1271 BUG_ON(!insn_complete(&insn));
1272 BUG_ON(len != insn.length);
1274 tp->rel_addr = addr - (void *)_stext;
1275 tp->opcode = insn.opcode.bytes[0];
1277 switch (tp->opcode) {
1278 case INT3_INSN_OPCODE:
1281 case CALL_INSN_OPCODE:
1282 case JMP32_INSN_OPCODE:
1283 case JMP8_INSN_OPCODE:
1284 tp->rel32 = insn.immediate.value;
1287 default: /* assume NOP */
1289 case 2: /* NOP2 -- emulate as JMP8+0 */
1290 BUG_ON(memcmp(emulate, ideal_nops[len], len));
1291 tp->opcode = JMP8_INSN_OPCODE;
1295 case 5: /* NOP5 -- emulate as JMP32+0 */
1296 BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1297 tp->opcode = JMP32_INSN_OPCODE;
1301 default: /* unknown instruction */
1309 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1312 static bool tp_order_fail(void *addr)
1314 struct text_poke_loc *tp;
1319 if (!addr) /* force */
1322 tp = &tp_vec[tp_vec_nr - 1];
1323 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1329 static void text_poke_flush(void *addr)
1331 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1332 text_poke_bp_batch(tp_vec, tp_vec_nr);
1337 void text_poke_finish(void)
1339 text_poke_flush(NULL);
1342 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1344 struct text_poke_loc *tp;
1346 if (unlikely(system_state == SYSTEM_BOOTING)) {
1347 text_poke_early(addr, opcode, len);
1351 text_poke_flush(addr);
1353 tp = &tp_vec[tp_vec_nr++];
1354 text_poke_loc_init(tp, addr, opcode, len, emulate);
1358 * text_poke_bp() -- update instructions on live kernel on SMP
1359 * @addr: address to patch
1360 * @opcode: opcode of new instruction
1361 * @len: length to copy
1362 * @handler: address to jump to when the temporary breakpoint is hit
1364 * Update a single instruction with the vector in the stack, avoiding
1365 * dynamically allocated memory. This function should be used when it is
1366 * not possible to allocate memory.
1368 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1370 struct text_poke_loc tp;
1372 if (unlikely(system_state == SYSTEM_BOOTING)) {
1373 text_poke_early(addr, opcode, len);
1377 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1378 text_poke_bp_batch(&tp, 1);