Merge tag 'Smack-for-5.15' of git://github.com/cschaufler/smack-next
[linux-2.6-microblaze.git] / arch / x86 / kernel / jump_label.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * jump label x86 support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  *
7  */
8 #include <linux/jump_label.h>
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/jhash.h>
14 #include <linux/cpu.h>
15 #include <asm/kprobes.h>
16 #include <asm/alternative.h>
17 #include <asm/text-patching.h>
18 #include <asm/insn.h>
19
20 int arch_jump_entry_size(struct jump_entry *entry)
21 {
22         struct insn insn = {};
23
24         insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
25         BUG_ON(insn.length != 2 && insn.length != 5);
26
27         return insn.length;
28 }
29
30 struct jump_label_patch {
31         const void *code;
32         int size;
33 };
34
35 static struct jump_label_patch
36 __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
37 {
38         const void *expect, *code, *nop;
39         const void *addr, *dest;
40         int size;
41
42         addr = (void *)jump_entry_code(entry);
43         dest = (void *)jump_entry_target(entry);
44
45         size = arch_jump_entry_size(entry);
46         switch (size) {
47         case JMP8_INSN_SIZE:
48                 code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
49                 nop = x86_nops[size];
50                 break;
51
52         case JMP32_INSN_SIZE:
53                 code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
54                 nop = x86_nops[size];
55                 break;
56
57         default: BUG();
58         }
59
60         if (type == JUMP_LABEL_JMP)
61                 expect = nop;
62         else
63                 expect = code;
64
65         if (memcmp(addr, expect, size)) {
66                 /*
67                  * The location is not an op that we were expecting.
68                  * Something went wrong. Crash the box, as something could be
69                  * corrupting the kernel.
70                  */
71                 pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
72                                 addr, addr, addr, expect, size, type);
73                 BUG();
74         }
75
76         if (type == JUMP_LABEL_NOP)
77                 code = nop;
78
79         return (struct jump_label_patch){.code = code, .size = size};
80 }
81
82 static __always_inline void
83 __jump_label_transform(struct jump_entry *entry,
84                        enum jump_label_type type,
85                        int init)
86 {
87         const struct jump_label_patch jlp = __jump_label_patch(entry, type);
88
89         /*
90          * As long as only a single processor is running and the code is still
91          * not marked as RO, text_poke_early() can be used; Checking that
92          * system_state is SYSTEM_BOOTING guarantees it. It will be set to
93          * SYSTEM_SCHEDULING before other cores are awaken and before the
94          * code is write-protected.
95          *
96          * At the time the change is being done, just ignore whether we
97          * are doing nop -> jump or jump -> nop transition, and assume
98          * always nop being the 'currently valid' instruction
99          */
100         if (init || system_state == SYSTEM_BOOTING) {
101                 text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
102                 return;
103         }
104
105         text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
106 }
107
108 static void __ref jump_label_transform(struct jump_entry *entry,
109                                        enum jump_label_type type,
110                                        int init)
111 {
112         mutex_lock(&text_mutex);
113         __jump_label_transform(entry, type, init);
114         mutex_unlock(&text_mutex);
115 }
116
117 void arch_jump_label_transform(struct jump_entry *entry,
118                                enum jump_label_type type)
119 {
120         jump_label_transform(entry, type, 0);
121 }
122
123 bool arch_jump_label_transform_queue(struct jump_entry *entry,
124                                      enum jump_label_type type)
125 {
126         struct jump_label_patch jlp;
127
128         if (system_state == SYSTEM_BOOTING) {
129                 /*
130                  * Fallback to the non-batching mode.
131                  */
132                 arch_jump_label_transform(entry, type);
133                 return true;
134         }
135
136         mutex_lock(&text_mutex);
137         jlp = __jump_label_patch(entry, type);
138         text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
139         mutex_unlock(&text_mutex);
140         return true;
141 }
142
143 void arch_jump_label_transform_apply(void)
144 {
145         mutex_lock(&text_mutex);
146         text_poke_finish();
147         mutex_unlock(&text_mutex);
148 }
149
150 static enum {
151         JL_STATE_START,
152         JL_STATE_NO_UPDATE,
153         JL_STATE_UPDATE,
154 } jlstate __initdata_or_module = JL_STATE_START;
155
156 __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
157                                       enum jump_label_type type)
158 {
159         if (jlstate == JL_STATE_UPDATE)
160                 jump_label_transform(entry, type, 1);
161 }