Merge tag 'amd-drm-next-6.4-2023-03-17' of https://gitlab.freedesktop.org/agd5f/linux...
[linux-2.6-microblaze.git] / arch / riscv / kernel / probes / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #define pr_fmt(fmt) "kprobes: " fmt
4
5 #include <linux/kprobes.h>
6 #include <linux/extable.h>
7 #include <linux/slab.h>
8 #include <linux/stop_machine.h>
9 #include <asm/ptrace.h>
10 #include <linux/uaccess.h>
11 #include <asm/sections.h>
12 #include <asm/cacheflush.h>
13 #include <asm/bug.h>
14 #include <asm/patch.h>
15
16 #include "decode-insn.h"
17
18 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
19 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
20
21 static void __kprobes
22 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
23
24 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
25 {
26         u32 insn = __BUG_INSN_32;
27         unsigned long offset = GET_INSN_LENGTH(p->opcode);
28
29         p->ainsn.api.restore = (unsigned long)p->addr + offset;
30
31         patch_text(p->ainsn.api.insn, &p->opcode, 1);
32         patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
33                    &insn, 1);
34 }
35
36 static void __kprobes arch_prepare_simulate(struct kprobe *p)
37 {
38         p->ainsn.api.restore = 0;
39 }
40
41 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
42 {
43         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
44
45         if (p->ainsn.api.handler)
46                 p->ainsn.api.handler((u32)p->opcode,
47                                         (unsigned long)p->addr, regs);
48
49         post_kprobe_handler(p, kcb, regs);
50 }
51
52 static bool __kprobes arch_check_kprobe(struct kprobe *p)
53 {
54         unsigned long tmp  = (unsigned long)p->addr - p->offset;
55         unsigned long addr = (unsigned long)p->addr;
56
57         while (tmp <= addr) {
58                 if (tmp == addr)
59                         return true;
60
61                 tmp += GET_INSN_LENGTH(*(u16 *)tmp);
62         }
63
64         return false;
65 }
66
67 int __kprobes arch_prepare_kprobe(struct kprobe *p)
68 {
69         u16 *insn = (u16 *)p->addr;
70
71         if ((unsigned long)insn & 0x1)
72                 return -EILSEQ;
73
74         if (!arch_check_kprobe(p))
75                 return -EILSEQ;
76
77         /* copy instruction */
78         p->opcode = (kprobe_opcode_t)(*insn++);
79         if (GET_INSN_LENGTH(p->opcode) == 4)
80                 p->opcode |= (kprobe_opcode_t)(*insn) << 16;
81
82         /* decode instruction */
83         switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
84         case INSN_REJECTED:     /* insn not supported */
85                 return -EINVAL;
86
87         case INSN_GOOD_NO_SLOT: /* insn need simulation */
88                 p->ainsn.api.insn = NULL;
89                 break;
90
91         case INSN_GOOD: /* instruction uses slot */
92                 p->ainsn.api.insn = get_insn_slot();
93                 if (!p->ainsn.api.insn)
94                         return -ENOMEM;
95                 break;
96         }
97
98         /* prepare the instruction */
99         if (p->ainsn.api.insn)
100                 arch_prepare_ss_slot(p);
101         else
102                 arch_prepare_simulate(p);
103
104         return 0;
105 }
106
107 #ifdef CONFIG_MMU
108 void *alloc_insn_page(void)
109 {
110         return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
111                                      GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
112                                      VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
113                                      __builtin_return_address(0));
114 }
115 #endif
116
117 /* install breakpoint in text */
118 void __kprobes arch_arm_kprobe(struct kprobe *p)
119 {
120         u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
121                    __BUG_INSN_32 : __BUG_INSN_16;
122
123         patch_text(p->addr, &insn, 1);
124 }
125
126 /* remove breakpoint from text */
127 void __kprobes arch_disarm_kprobe(struct kprobe *p)
128 {
129         patch_text(p->addr, &p->opcode, 1);
130 }
131
132 void __kprobes arch_remove_kprobe(struct kprobe *p)
133 {
134 }
135
136 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
137 {
138         kcb->prev_kprobe.kp = kprobe_running();
139         kcb->prev_kprobe.status = kcb->kprobe_status;
140 }
141
142 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
143 {
144         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
145         kcb->kprobe_status = kcb->prev_kprobe.status;
146 }
147
148 static void __kprobes set_current_kprobe(struct kprobe *p)
149 {
150         __this_cpu_write(current_kprobe, p);
151 }
152
153 /*
154  * Interrupts need to be disabled before single-step mode is set, and not
155  * reenabled until after single-step mode ends.
156  * Without disabling interrupt on local CPU, there is a chance of
157  * interrupt occurrence in the period of exception return and  start of
158  * out-of-line single-step, that result in wrongly single stepping
159  * into the interrupt handler.
160  */
161 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
162                                                 struct pt_regs *regs)
163 {
164         kcb->saved_status = regs->status;
165         regs->status &= ~SR_SPIE;
166 }
167
168 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
169                                                 struct pt_regs *regs)
170 {
171         regs->status = kcb->saved_status;
172 }
173
174 static void __kprobes setup_singlestep(struct kprobe *p,
175                                        struct pt_regs *regs,
176                                        struct kprobe_ctlblk *kcb, int reenter)
177 {
178         unsigned long slot;
179
180         if (reenter) {
181                 save_previous_kprobe(kcb);
182                 set_current_kprobe(p);
183                 kcb->kprobe_status = KPROBE_REENTER;
184         } else {
185                 kcb->kprobe_status = KPROBE_HIT_SS;
186         }
187
188         if (p->ainsn.api.insn) {
189                 /* prepare for single stepping */
190                 slot = (unsigned long)p->ainsn.api.insn;
191
192                 /* IRQs and single stepping do not mix well. */
193                 kprobes_save_local_irqflag(kcb, regs);
194
195                 instruction_pointer_set(regs, slot);
196         } else {
197                 /* insn simulation */
198                 arch_simulate_insn(p, regs);
199         }
200 }
201
202 static int __kprobes reenter_kprobe(struct kprobe *p,
203                                     struct pt_regs *regs,
204                                     struct kprobe_ctlblk *kcb)
205 {
206         switch (kcb->kprobe_status) {
207         case KPROBE_HIT_SSDONE:
208         case KPROBE_HIT_ACTIVE:
209                 kprobes_inc_nmissed_count(p);
210                 setup_singlestep(p, regs, kcb, 1);
211                 break;
212         case KPROBE_HIT_SS:
213         case KPROBE_REENTER:
214                 pr_warn("Failed to recover from reentered kprobes.\n");
215                 dump_kprobe(p);
216                 BUG();
217                 break;
218         default:
219                 WARN_ON(1);
220                 return 0;
221         }
222
223         return 1;
224 }
225
226 static void __kprobes
227 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
228 {
229         /* return addr restore if non-branching insn */
230         if (cur->ainsn.api.restore != 0)
231                 regs->epc = cur->ainsn.api.restore;
232
233         /* restore back original saved kprobe variables and continue */
234         if (kcb->kprobe_status == KPROBE_REENTER) {
235                 restore_previous_kprobe(kcb);
236                 return;
237         }
238
239         /* call post handler */
240         kcb->kprobe_status = KPROBE_HIT_SSDONE;
241         if (cur->post_handler)  {
242                 /* post_handler can hit breakpoint and single step
243                  * again, so we enable D-flag for recursive exception.
244                  */
245                 cur->post_handler(cur, regs, 0);
246         }
247
248         reset_current_kprobe();
249 }
250
251 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
252 {
253         struct kprobe *cur = kprobe_running();
254         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
255
256         switch (kcb->kprobe_status) {
257         case KPROBE_HIT_SS:
258         case KPROBE_REENTER:
259                 /*
260                  * We are here because the instruction being single
261                  * stepped caused a page fault. We reset the current
262                  * kprobe and the ip points back to the probe address
263                  * and allow the page fault handler to continue as a
264                  * normal page fault.
265                  */
266                 regs->epc = (unsigned long) cur->addr;
267                 BUG_ON(!instruction_pointer(regs));
268
269                 if (kcb->kprobe_status == KPROBE_REENTER)
270                         restore_previous_kprobe(kcb);
271                 else {
272                         kprobes_restore_local_irqflag(kcb, regs);
273                         reset_current_kprobe();
274                 }
275
276                 break;
277         case KPROBE_HIT_ACTIVE:
278         case KPROBE_HIT_SSDONE:
279                 /*
280                  * In case the user-specified fault handler returned
281                  * zero, try to fix up.
282                  */
283                 if (fixup_exception(regs))
284                         return 1;
285         }
286         return 0;
287 }
288
289 bool __kprobes
290 kprobe_breakpoint_handler(struct pt_regs *regs)
291 {
292         struct kprobe *p, *cur_kprobe;
293         struct kprobe_ctlblk *kcb;
294         unsigned long addr = instruction_pointer(regs);
295
296         kcb = get_kprobe_ctlblk();
297         cur_kprobe = kprobe_running();
298
299         p = get_kprobe((kprobe_opcode_t *) addr);
300
301         if (p) {
302                 if (cur_kprobe) {
303                         if (reenter_kprobe(p, regs, kcb))
304                                 return true;
305                 } else {
306                         /* Probe hit */
307                         set_current_kprobe(p);
308                         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
309
310                         /*
311                          * If we have no pre-handler or it returned 0, we
312                          * continue with normal processing.  If we have a
313                          * pre-handler and it returned non-zero, it will
314                          * modify the execution path and no need to single
315                          * stepping. Let's just reset current kprobe and exit.
316                          *
317                          * pre_handler can hit a breakpoint and can step thru
318                          * before return.
319                          */
320                         if (!p->pre_handler || !p->pre_handler(p, regs))
321                                 setup_singlestep(p, regs, kcb, 0);
322                         else
323                                 reset_current_kprobe();
324                 }
325                 return true;
326         }
327
328         /*
329          * The breakpoint instruction was removed right
330          * after we hit it.  Another cpu has removed
331          * either a probepoint or a debugger breakpoint
332          * at this address.  In either case, no further
333          * handling of this interrupt is appropriate.
334          * Return back to original instruction, and continue.
335          */
336         return false;
337 }
338
339 bool __kprobes
340 kprobe_single_step_handler(struct pt_regs *regs)
341 {
342         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
343         unsigned long addr = instruction_pointer(regs);
344         struct kprobe *cur = kprobe_running();
345
346         if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
347             ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
348                 kprobes_restore_local_irqflag(kcb, regs);
349                 post_kprobe_handler(cur, kcb, regs);
350                 return true;
351         }
352         /* not ours, kprobes should ignore it */
353         return false;
354 }
355
356 /*
357  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
358  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
359  */
360 int __init arch_populate_kprobe_blacklist(void)
361 {
362         int ret;
363
364         ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
365                                         (unsigned long)__irqentry_text_end);
366         return ret;
367 }
368
369 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
370 {
371         return 0;
372 }
373
374 int __init arch_init_kprobes(void)
375 {
376         return 0;
377 }