clocksource: Make clocksource watchdog test safe for slow-HZ systems
[linux-2.6-microblaze.git] / arch / powerpc / kernel / optprobes.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Code for Kernel probes Jump optimization.
4  *
5  * Copyright 2017, Anju T, IBM Corp.
6  */
7
8 #include <linux/kprobes.h>
9 #include <linux/jump_label.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <asm/kprobes.h>
14 #include <asm/ptrace.h>
15 #include <asm/cacheflush.h>
16 #include <asm/code-patching.h>
17 #include <asm/sstep.h>
18 #include <asm/ppc-opcode.h>
19 #include <asm/inst.h>
20
21 #define TMPL_CALL_HDLR_IDX      (optprobe_template_call_handler - optprobe_template_entry)
22 #define TMPL_EMULATE_IDX        (optprobe_template_call_emulate - optprobe_template_entry)
23 #define TMPL_RET_IDX            (optprobe_template_ret - optprobe_template_entry)
24 #define TMPL_OP_IDX             (optprobe_template_op_address - optprobe_template_entry)
25 #define TMPL_INSN_IDX           (optprobe_template_insn - optprobe_template_entry)
26 #define TMPL_END_IDX            (optprobe_template_end - optprobe_template_entry)
27
28 static bool insn_page_in_use;
29
30 void *alloc_optinsn_page(void)
31 {
32         if (insn_page_in_use)
33                 return NULL;
34         insn_page_in_use = true;
35         return &optinsn_slot;
36 }
37
38 void free_optinsn_page(void *page)
39 {
40         insn_page_in_use = false;
41 }
42
43 /*
44  * Check if we can optimize this probe. Returns NIP post-emulation if this can
45  * be optimized and 0 otherwise.
46  */
47 static unsigned long can_optimize(struct kprobe *p)
48 {
49         struct pt_regs regs;
50         struct instruction_op op;
51         unsigned long nip = 0;
52         unsigned long addr = (unsigned long)p->addr;
53
54         /*
55          * kprobe placed for kretprobe during boot time
56          * has a 'nop' instruction, which can be emulated.
57          * So further checks can be skipped.
58          */
59         if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
60                 return addr + sizeof(kprobe_opcode_t);
61
62         /*
63          * We only support optimizing kernel addresses, but not
64          * module addresses.
65          *
66          * FIXME: Optimize kprobes placed in module addresses.
67          */
68         if (!is_kernel_addr(addr))
69                 return 0;
70
71         memset(&regs, 0, sizeof(struct pt_regs));
72         regs.nip = addr;
73         regs.trap = 0x0;
74         regs.msr = MSR_KERNEL;
75
76         /*
77          * Kprobe placed in conditional branch instructions are
78          * not optimized, as we can't predict the nip prior with
79          * dummy pt_regs and can not ensure that the return branch
80          * from detour buffer falls in the range of address (i.e 32MB).
81          * A branch back from trampoline is set up in the detour buffer
82          * to the nip returned by the analyse_instr() here.
83          *
84          * Ensure that the instruction is not a conditional branch,
85          * and that can be emulated.
86          */
87         if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
88             analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
89                 emulate_update_regs(&regs, &op);
90                 nip = regs.nip;
91         }
92
93         return nip;
94 }
95
96 static void optimized_callback(struct optimized_kprobe *op,
97                                struct pt_regs *regs)
98 {
99         /* This is possible if op is under delayed unoptimizing */
100         if (kprobe_disabled(&op->kp))
101                 return;
102
103         preempt_disable();
104
105         if (kprobe_running()) {
106                 kprobes_inc_nmissed_count(&op->kp);
107         } else {
108                 __this_cpu_write(current_kprobe, &op->kp);
109                 regs_set_return_ip(regs, (unsigned long)op->kp.addr);
110                 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
111                 opt_pre_handler(&op->kp, regs);
112                 __this_cpu_write(current_kprobe, NULL);
113         }
114
115         preempt_enable_no_resched();
116 }
117 NOKPROBE_SYMBOL(optimized_callback);
118
119 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
120 {
121         if (op->optinsn.insn) {
122                 free_optinsn_slot(op->optinsn.insn, 1);
123                 op->optinsn.insn = NULL;
124         }
125 }
126
127 static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
128 {
129         patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
130         patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
131 }
132
133 /*
134  * Generate instructions to load provided immediate 64-bit value
135  * to register 'reg' and patch these instructions at 'addr'.
136  */
137 static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
138 {
139         patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
140         patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
141         patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
142         patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
143         patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
144 }
145
146 static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
147 {
148         if (IS_ENABLED(CONFIG_PPC64))
149                 patch_imm64_load_insns(val, reg, addr);
150         else
151                 patch_imm32_load_insns(val, reg, addr);
152 }
153
154 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
155 {
156         struct ppc_inst branch_op_callback, branch_emulate_step, temp;
157         unsigned long op_callback_addr, emulate_step_addr;
158         kprobe_opcode_t *buff;
159         long b_offset;
160         unsigned long nip, size;
161         int rc, i;
162
163         nip = can_optimize(p);
164         if (!nip)
165                 return -EILSEQ;
166
167         /* Allocate instruction slot for detour buffer */
168         buff = get_optinsn_slot();
169         if (!buff)
170                 return -ENOMEM;
171
172         /*
173          * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
174          *
175          * The target address has to be relatively nearby, to permit use
176          * of branch instruction in powerpc, because the address is specified
177          * in an immediate field in the instruction opcode itself, ie 24 bits
178          * in the opcode specify the address. Therefore the address should
179          * be within 32MB on either side of the current instruction.
180          */
181         b_offset = (unsigned long)buff - (unsigned long)p->addr;
182         if (!is_offset_in_branch_range(b_offset))
183                 goto error;
184
185         /* Check if the return address is also within 32MB range */
186         b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
187         if (!is_offset_in_branch_range(b_offset))
188                 goto error;
189
190         /* Setup template */
191         /* We can optimize this via patch_instruction_window later */
192         size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
193         pr_devel("Copying template to %p, size %lu\n", buff, size);
194         for (i = 0; i < size; i++) {
195                 rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
196                 if (rc < 0)
197                         goto error;
198         }
199
200         /*
201          * Fixup the template with instructions to:
202          * 1. load the address of the actual probepoint
203          */
204         patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
205
206         /*
207          * 2. branch to optimized_callback() and emulate_step()
208          */
209         op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
210         emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
211         if (!op_callback_addr || !emulate_step_addr) {
212                 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
213                 goto error;
214         }
215
216         rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
217                            op_callback_addr, BRANCH_SET_LINK);
218
219         rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
220                             emulate_step_addr, BRANCH_SET_LINK);
221
222         if (rc)
223                 goto error;
224
225         patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
226         patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
227
228         /*
229          * 3. load instruction to be emulated into relevant register, and
230          */
231         if (IS_ENABLED(CONFIG_PPC64)) {
232                 temp = ppc_inst_read(p->ainsn.insn);
233                 patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
234         } else {
235                 patch_imm_load_insns((unsigned long)p->ainsn.insn, 4, buff + TMPL_INSN_IDX);
236         }
237
238         /*
239          * 4. branch back from trampoline
240          */
241         patch_branch(buff + TMPL_RET_IDX, nip, 0);
242
243         flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
244
245         op->optinsn.insn = buff;
246
247         return 0;
248
249 error:
250         free_optinsn_slot(buff, 0);
251         return -ERANGE;
252
253 }
254
255 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
256 {
257         return optinsn->insn != NULL;
258 }
259
260 /*
261  * On powerpc, Optprobes always replaces one instruction (4 bytes
262  * aligned and 4 bytes long). It is impossible to encounter another
263  * kprobe in this address range. So always return 0.
264  */
265 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
266 {
267         return 0;
268 }
269
270 void arch_optimize_kprobes(struct list_head *oplist)
271 {
272         struct ppc_inst instr;
273         struct optimized_kprobe *op;
274         struct optimized_kprobe *tmp;
275
276         list_for_each_entry_safe(op, tmp, oplist, list) {
277                 /*
278                  * Backup instructions which will be replaced
279                  * by jump address
280                  */
281                 memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
282                 create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
283                 patch_instruction(op->kp.addr, instr);
284                 list_del_init(&op->list);
285         }
286 }
287
288 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
289 {
290         arch_arm_kprobe(&op->kp);
291 }
292
293 void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
294 {
295         struct optimized_kprobe *op;
296         struct optimized_kprobe *tmp;
297
298         list_for_each_entry_safe(op, tmp, oplist, list) {
299                 arch_unoptimize_kprobe(op);
300                 list_move(&op->list, done_list);
301         }
302 }
303
304 int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
305 {
306         return ((unsigned long)op->kp.addr <= addr &&
307                 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
308 }