Merge tag 'trace-user-events-v6.10' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / riscv / kernel / probes / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #define pr_fmt(fmt) "kprobes: " fmt
4
5 #include <linux/kprobes.h>
6 #include <linux/extable.h>
7 #include <linux/slab.h>
8 #include <linux/stop_machine.h>
9 #include <asm/ptrace.h>
10 #include <linux/uaccess.h>
11 #include <asm/sections.h>
12 #include <asm/cacheflush.h>
13 #include <asm/bug.h>
14 #include <asm/patch.h>
15
16 #include "decode-insn.h"
17
18 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
19 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
20
21 static void __kprobes
22 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
23
24 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
25 {
26         u32 insn = __BUG_INSN_32;
27         unsigned long offset = GET_INSN_LENGTH(p->opcode);
28
29         p->ainsn.api.restore = (unsigned long)p->addr + offset;
30
31         patch_text(p->ainsn.api.insn, &p->opcode, 1);
32         patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
33                    &insn, 1);
34 }
35
36 static void __kprobes arch_prepare_simulate(struct kprobe *p)
37 {
38         p->ainsn.api.restore = 0;
39 }
40
41 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
42 {
43         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
44
45         if (p->ainsn.api.handler)
46                 p->ainsn.api.handler((u32)p->opcode,
47                                         (unsigned long)p->addr, regs);
48
49         post_kprobe_handler(p, kcb, regs);
50 }
51
52 static bool __kprobes arch_check_kprobe(struct kprobe *p)
53 {
54         unsigned long tmp  = (unsigned long)p->addr - p->offset;
55         unsigned long addr = (unsigned long)p->addr;
56
57         while (tmp <= addr) {
58                 if (tmp == addr)
59                         return true;
60
61                 tmp += GET_INSN_LENGTH(*(u16 *)tmp);
62         }
63
64         return false;
65 }
66
67 int __kprobes arch_prepare_kprobe(struct kprobe *p)
68 {
69         u16 *insn = (u16 *)p->addr;
70
71         if ((unsigned long)insn & 0x1)
72                 return -EILSEQ;
73
74         if (!arch_check_kprobe(p))
75                 return -EILSEQ;
76
77         /* copy instruction */
78         p->opcode = (kprobe_opcode_t)(*insn++);
79         if (GET_INSN_LENGTH(p->opcode) == 4)
80                 p->opcode |= (kprobe_opcode_t)(*insn) << 16;
81
82         /* decode instruction */
83         switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
84         case INSN_REJECTED:     /* insn not supported */
85                 return -EINVAL;
86
87         case INSN_GOOD_NO_SLOT: /* insn need simulation */
88                 p->ainsn.api.insn = NULL;
89                 break;
90
91         case INSN_GOOD: /* instruction uses slot */
92                 p->ainsn.api.insn = get_insn_slot();
93                 if (!p->ainsn.api.insn)
94                         return -ENOMEM;
95                 break;
96         }
97
98         /* prepare the instruction */
99         if (p->ainsn.api.insn)
100                 arch_prepare_ss_slot(p);
101         else
102                 arch_prepare_simulate(p);
103
104         return 0;
105 }
106
107 /* install breakpoint in text */
108 void __kprobes arch_arm_kprobe(struct kprobe *p)
109 {
110         u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
111                    __BUG_INSN_32 : __BUG_INSN_16;
112
113         patch_text(p->addr, &insn, 1);
114 }
115
116 /* remove breakpoint from text */
117 void __kprobes arch_disarm_kprobe(struct kprobe *p)
118 {
119         patch_text(p->addr, &p->opcode, 1);
120 }
121
122 void __kprobes arch_remove_kprobe(struct kprobe *p)
123 {
124 }
125
126 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
127 {
128         kcb->prev_kprobe.kp = kprobe_running();
129         kcb->prev_kprobe.status = kcb->kprobe_status;
130 }
131
132 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
133 {
134         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
135         kcb->kprobe_status = kcb->prev_kprobe.status;
136 }
137
138 static void __kprobes set_current_kprobe(struct kprobe *p)
139 {
140         __this_cpu_write(current_kprobe, p);
141 }
142
143 /*
144  * Interrupts need to be disabled before single-step mode is set, and not
145  * reenabled until after single-step mode ends.
146  * Without disabling interrupt on local CPU, there is a chance of
147  * interrupt occurrence in the period of exception return and  start of
148  * out-of-line single-step, that result in wrongly single stepping
149  * into the interrupt handler.
150  */
151 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
152                                                 struct pt_regs *regs)
153 {
154         kcb->saved_status = regs->status;
155         regs->status &= ~SR_SPIE;
156 }
157
158 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
159                                                 struct pt_regs *regs)
160 {
161         regs->status = kcb->saved_status;
162 }
163
164 static void __kprobes setup_singlestep(struct kprobe *p,
165                                        struct pt_regs *regs,
166                                        struct kprobe_ctlblk *kcb, int reenter)
167 {
168         unsigned long slot;
169
170         if (reenter) {
171                 save_previous_kprobe(kcb);
172                 set_current_kprobe(p);
173                 kcb->kprobe_status = KPROBE_REENTER;
174         } else {
175                 kcb->kprobe_status = KPROBE_HIT_SS;
176         }
177
178         if (p->ainsn.api.insn) {
179                 /* prepare for single stepping */
180                 slot = (unsigned long)p->ainsn.api.insn;
181
182                 /* IRQs and single stepping do not mix well. */
183                 kprobes_save_local_irqflag(kcb, regs);
184
185                 instruction_pointer_set(regs, slot);
186         } else {
187                 /* insn simulation */
188                 arch_simulate_insn(p, regs);
189         }
190 }
191
192 static int __kprobes reenter_kprobe(struct kprobe *p,
193                                     struct pt_regs *regs,
194                                     struct kprobe_ctlblk *kcb)
195 {
196         switch (kcb->kprobe_status) {
197         case KPROBE_HIT_SSDONE:
198         case KPROBE_HIT_ACTIVE:
199                 kprobes_inc_nmissed_count(p);
200                 setup_singlestep(p, regs, kcb, 1);
201                 break;
202         case KPROBE_HIT_SS:
203         case KPROBE_REENTER:
204                 pr_warn("Failed to recover from reentered kprobes.\n");
205                 dump_kprobe(p);
206                 BUG();
207                 break;
208         default:
209                 WARN_ON(1);
210                 return 0;
211         }
212
213         return 1;
214 }
215
216 static void __kprobes
217 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
218 {
219         /* return addr restore if non-branching insn */
220         if (cur->ainsn.api.restore != 0)
221                 regs->epc = cur->ainsn.api.restore;
222
223         /* restore back original saved kprobe variables and continue */
224         if (kcb->kprobe_status == KPROBE_REENTER) {
225                 restore_previous_kprobe(kcb);
226                 return;
227         }
228
229         /* call post handler */
230         kcb->kprobe_status = KPROBE_HIT_SSDONE;
231         if (cur->post_handler)  {
232                 /* post_handler can hit breakpoint and single step
233                  * again, so we enable D-flag for recursive exception.
234                  */
235                 cur->post_handler(cur, regs, 0);
236         }
237
238         reset_current_kprobe();
239 }
240
241 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
242 {
243         struct kprobe *cur = kprobe_running();
244         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
245
246         switch (kcb->kprobe_status) {
247         case KPROBE_HIT_SS:
248         case KPROBE_REENTER:
249                 /*
250                  * We are here because the instruction being single
251                  * stepped caused a page fault. We reset the current
252                  * kprobe and the ip points back to the probe address
253                  * and allow the page fault handler to continue as a
254                  * normal page fault.
255                  */
256                 regs->epc = (unsigned long) cur->addr;
257                 BUG_ON(!instruction_pointer(regs));
258
259                 if (kcb->kprobe_status == KPROBE_REENTER)
260                         restore_previous_kprobe(kcb);
261                 else {
262                         kprobes_restore_local_irqflag(kcb, regs);
263                         reset_current_kprobe();
264                 }
265
266                 break;
267         case KPROBE_HIT_ACTIVE:
268         case KPROBE_HIT_SSDONE:
269                 /*
270                  * In case the user-specified fault handler returned
271                  * zero, try to fix up.
272                  */
273                 if (fixup_exception(regs))
274                         return 1;
275         }
276         return 0;
277 }
278
279 bool __kprobes
280 kprobe_breakpoint_handler(struct pt_regs *regs)
281 {
282         struct kprobe *p, *cur_kprobe;
283         struct kprobe_ctlblk *kcb;
284         unsigned long addr = instruction_pointer(regs);
285
286         kcb = get_kprobe_ctlblk();
287         cur_kprobe = kprobe_running();
288
289         p = get_kprobe((kprobe_opcode_t *) addr);
290
291         if (p) {
292                 if (cur_kprobe) {
293                         if (reenter_kprobe(p, regs, kcb))
294                                 return true;
295                 } else {
296                         /* Probe hit */
297                         set_current_kprobe(p);
298                         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
299
300                         /*
301                          * If we have no pre-handler or it returned 0, we
302                          * continue with normal processing.  If we have a
303                          * pre-handler and it returned non-zero, it will
304                          * modify the execution path and no need to single
305                          * stepping. Let's just reset current kprobe and exit.
306                          *
307                          * pre_handler can hit a breakpoint and can step thru
308                          * before return.
309                          */
310                         if (!p->pre_handler || !p->pre_handler(p, regs))
311                                 setup_singlestep(p, regs, kcb, 0);
312                         else
313                                 reset_current_kprobe();
314                 }
315                 return true;
316         }
317
318         /*
319          * The breakpoint instruction was removed right
320          * after we hit it.  Another cpu has removed
321          * either a probepoint or a debugger breakpoint
322          * at this address.  In either case, no further
323          * handling of this interrupt is appropriate.
324          * Return back to original instruction, and continue.
325          */
326         return false;
327 }
328
329 bool __kprobes
330 kprobe_single_step_handler(struct pt_regs *regs)
331 {
332         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
333         unsigned long addr = instruction_pointer(regs);
334         struct kprobe *cur = kprobe_running();
335
336         if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
337             ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
338                 kprobes_restore_local_irqflag(kcb, regs);
339                 post_kprobe_handler(cur, kcb, regs);
340                 return true;
341         }
342         /* not ours, kprobes should ignore it */
343         return false;
344 }
345
346 /*
347  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
348  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
349  */
350 int __init arch_populate_kprobe_blacklist(void)
351 {
352         int ret;
353
354         ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
355                                         (unsigned long)__irqentry_text_end);
356         return ret;
357 }
358
359 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
360 {
361         return 0;
362 }
363
364 int __init arch_init_kprobes(void)
365 {
366         return 0;
367 }