Linux 6.0-rc1
[linux-2.6-microblaze.git] / kernel / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *  kernel/kprobes.c
5  *
6  * Copyright (C) IBM Corporation, 2002, 2004
7  *
8  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
9  *              Probes initial implementation (includes suggestions from
10  *              Rusty Russell).
11  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12  *              hlists and exceptions notifier as suggested by Andi Kleen.
13  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14  *              interface to access function arguments.
15  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16  *              exceptions notifier to be first on the priority list.
17  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
19  *              <prasanna@in.ibm.com> added function-return probes.
20  */
21 #include <linux/kprobes.h>
22 #include <linux/hash.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/stddef.h>
26 #include <linux/export.h>
27 #include <linux/moduleloader.h>
28 #include <linux/kallsyms.h>
29 #include <linux/freezer.h>
30 #include <linux/seq_file.h>
31 #include <linux/debugfs.h>
32 #include <linux/sysctl.h>
33 #include <linux/kdebug.h>
34 #include <linux/memory.h>
35 #include <linux/ftrace.h>
36 #include <linux/cpu.h>
37 #include <linux/jump_label.h>
38 #include <linux/static_call.h>
39 #include <linux/perf_event.h>
40
41 #include <asm/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <linux/uaccess.h>
45
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
48
49
50 static int kprobes_initialized;
51 /* kprobe_table can be accessed by
52  * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
53  * Or
54  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
55  */
56 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
57
58 /* NOTE: change this value only with kprobe_mutex held */
59 static bool kprobes_all_disarmed;
60
61 /* This protects kprobe_table and optimizing_list */
62 static DEFINE_MUTEX(kprobe_mutex);
63 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
64
65 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
66                                         unsigned int __unused)
67 {
68         return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
69 }
70
71 /* Blacklist -- list of struct kprobe_blacklist_entry */
72 static LIST_HEAD(kprobe_blacklist);
73
74 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
75 /*
76  * kprobe->ainsn.insn points to the copy of the instruction to be
77  * single-stepped. x86_64, POWER4 and above have no-exec support and
78  * stepping on the instruction on a vmalloced/kmalloced/data page
79  * is a recipe for disaster
80  */
81 struct kprobe_insn_page {
82         struct list_head list;
83         kprobe_opcode_t *insns;         /* Page of instruction slots */
84         struct kprobe_insn_cache *cache;
85         int nused;
86         int ngarbage;
87         char slot_used[];
88 };
89
90 #define KPROBE_INSN_PAGE_SIZE(slots)                    \
91         (offsetof(struct kprobe_insn_page, slot_used) + \
92          (sizeof(char) * (slots)))
93
94 static int slots_per_page(struct kprobe_insn_cache *c)
95 {
96         return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
97 }
98
99 enum kprobe_slot_state {
100         SLOT_CLEAN = 0,
101         SLOT_DIRTY = 1,
102         SLOT_USED = 2,
103 };
104
105 void __weak *alloc_insn_page(void)
106 {
107         return module_alloc(PAGE_SIZE);
108 }
109
110 static void free_insn_page(void *page)
111 {
112         module_memfree(page);
113 }
114
115 struct kprobe_insn_cache kprobe_insn_slots = {
116         .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
117         .alloc = alloc_insn_page,
118         .free = free_insn_page,
119         .sym = KPROBE_INSN_PAGE_SYM,
120         .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
121         .insn_size = MAX_INSN_SIZE,
122         .nr_garbage = 0,
123 };
124 static int collect_garbage_slots(struct kprobe_insn_cache *c);
125
126 /**
127  * __get_insn_slot() - Find a slot on an executable page for an instruction.
128  * We allocate an executable page if there's no room on existing ones.
129  */
130 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
131 {
132         struct kprobe_insn_page *kip;
133         kprobe_opcode_t *slot = NULL;
134
135         /* Since the slot array is not protected by rcu, we need a mutex */
136         mutex_lock(&c->mutex);
137  retry:
138         rcu_read_lock();
139         list_for_each_entry_rcu(kip, &c->pages, list) {
140                 if (kip->nused < slots_per_page(c)) {
141                         int i;
142                         for (i = 0; i < slots_per_page(c); i++) {
143                                 if (kip->slot_used[i] == SLOT_CLEAN) {
144                                         kip->slot_used[i] = SLOT_USED;
145                                         kip->nused++;
146                                         slot = kip->insns + (i * c->insn_size);
147                                         rcu_read_unlock();
148                                         goto out;
149                                 }
150                         }
151                         /* kip->nused is broken. Fix it. */
152                         kip->nused = slots_per_page(c);
153                         WARN_ON(1);
154                 }
155         }
156         rcu_read_unlock();
157
158         /* If there are any garbage slots, collect it and try again. */
159         if (c->nr_garbage && collect_garbage_slots(c) == 0)
160                 goto retry;
161
162         /* All out of space.  Need to allocate a new page. */
163         kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
164         if (!kip)
165                 goto out;
166
167         /*
168          * Use module_alloc so this page is within +/- 2GB of where the
169          * kernel image and loaded module images reside. This is required
170          * so x86_64 can correctly handle the %rip-relative fixups.
171          */
172         kip->insns = c->alloc();
173         if (!kip->insns) {
174                 kfree(kip);
175                 goto out;
176         }
177         INIT_LIST_HEAD(&kip->list);
178         memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
179         kip->slot_used[0] = SLOT_USED;
180         kip->nused = 1;
181         kip->ngarbage = 0;
182         kip->cache = c;
183         list_add_rcu(&kip->list, &c->pages);
184         slot = kip->insns;
185
186         /* Record the perf ksymbol register event after adding the page */
187         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
188                            PAGE_SIZE, false, c->sym);
189 out:
190         mutex_unlock(&c->mutex);
191         return slot;
192 }
193
194 /* Return 1 if all garbages are collected, otherwise 0. */
195 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
196 {
197         kip->slot_used[idx] = SLOT_CLEAN;
198         kip->nused--;
199         if (kip->nused == 0) {
200                 /*
201                  * Page is no longer in use.  Free it unless
202                  * it's the last one.  We keep the last one
203                  * so as not to have to set it up again the
204                  * next time somebody inserts a probe.
205                  */
206                 if (!list_is_singular(&kip->list)) {
207                         /*
208                          * Record perf ksymbol unregister event before removing
209                          * the page.
210                          */
211                         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
212                                            (unsigned long)kip->insns, PAGE_SIZE, true,
213                                            kip->cache->sym);
214                         list_del_rcu(&kip->list);
215                         synchronize_rcu();
216                         kip->cache->free(kip->insns);
217                         kfree(kip);
218                 }
219                 return 1;
220         }
221         return 0;
222 }
223
224 static int collect_garbage_slots(struct kprobe_insn_cache *c)
225 {
226         struct kprobe_insn_page *kip, *next;
227
228         /* Ensure no-one is interrupted on the garbages */
229         synchronize_rcu();
230
231         list_for_each_entry_safe(kip, next, &c->pages, list) {
232                 int i;
233                 if (kip->ngarbage == 0)
234                         continue;
235                 kip->ngarbage = 0;      /* we will collect all garbages */
236                 for (i = 0; i < slots_per_page(c); i++) {
237                         if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
238                                 break;
239                 }
240         }
241         c->nr_garbage = 0;
242         return 0;
243 }
244
245 void __free_insn_slot(struct kprobe_insn_cache *c,
246                       kprobe_opcode_t *slot, int dirty)
247 {
248         struct kprobe_insn_page *kip;
249         long idx;
250
251         mutex_lock(&c->mutex);
252         rcu_read_lock();
253         list_for_each_entry_rcu(kip, &c->pages, list) {
254                 idx = ((long)slot - (long)kip->insns) /
255                         (c->insn_size * sizeof(kprobe_opcode_t));
256                 if (idx >= 0 && idx < slots_per_page(c))
257                         goto out;
258         }
259         /* Could not find this slot. */
260         WARN_ON(1);
261         kip = NULL;
262 out:
263         rcu_read_unlock();
264         /* Mark and sweep: this may sleep */
265         if (kip) {
266                 /* Check double free */
267                 WARN_ON(kip->slot_used[idx] != SLOT_USED);
268                 if (dirty) {
269                         kip->slot_used[idx] = SLOT_DIRTY;
270                         kip->ngarbage++;
271                         if (++c->nr_garbage > slots_per_page(c))
272                                 collect_garbage_slots(c);
273                 } else {
274                         collect_one_slot(kip, idx);
275                 }
276         }
277         mutex_unlock(&c->mutex);
278 }
279
280 /*
281  * Check given address is on the page of kprobe instruction slots.
282  * This will be used for checking whether the address on a stack
283  * is on a text area or not.
284  */
285 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
286 {
287         struct kprobe_insn_page *kip;
288         bool ret = false;
289
290         rcu_read_lock();
291         list_for_each_entry_rcu(kip, &c->pages, list) {
292                 if (addr >= (unsigned long)kip->insns &&
293                     addr < (unsigned long)kip->insns + PAGE_SIZE) {
294                         ret = true;
295                         break;
296                 }
297         }
298         rcu_read_unlock();
299
300         return ret;
301 }
302
303 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
304                              unsigned long *value, char *type, char *sym)
305 {
306         struct kprobe_insn_page *kip;
307         int ret = -ERANGE;
308
309         rcu_read_lock();
310         list_for_each_entry_rcu(kip, &c->pages, list) {
311                 if ((*symnum)--)
312                         continue;
313                 strlcpy(sym, c->sym, KSYM_NAME_LEN);
314                 *type = 't';
315                 *value = (unsigned long)kip->insns;
316                 ret = 0;
317                 break;
318         }
319         rcu_read_unlock();
320
321         return ret;
322 }
323
324 #ifdef CONFIG_OPTPROBES
325 void __weak *alloc_optinsn_page(void)
326 {
327         return alloc_insn_page();
328 }
329
330 void __weak free_optinsn_page(void *page)
331 {
332         free_insn_page(page);
333 }
334
335 /* For optimized_kprobe buffer */
336 struct kprobe_insn_cache kprobe_optinsn_slots = {
337         .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
338         .alloc = alloc_optinsn_page,
339         .free = free_optinsn_page,
340         .sym = KPROBE_OPTINSN_PAGE_SYM,
341         .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
342         /* .insn_size is initialized later */
343         .nr_garbage = 0,
344 };
345 #endif
346 #endif
347
348 /* We have preemption disabled.. so it is safe to use __ versions */
349 static inline void set_kprobe_instance(struct kprobe *kp)
350 {
351         __this_cpu_write(kprobe_instance, kp);
352 }
353
354 static inline void reset_kprobe_instance(void)
355 {
356         __this_cpu_write(kprobe_instance, NULL);
357 }
358
359 /*
360  * This routine is called either:
361  *      - under the kprobe_mutex - during kprobe_[un]register()
362  *                              OR
363  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
364  */
365 struct kprobe *get_kprobe(void *addr)
366 {
367         struct hlist_head *head;
368         struct kprobe *p;
369
370         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
371         hlist_for_each_entry_rcu(p, head, hlist,
372                                  lockdep_is_held(&kprobe_mutex)) {
373                 if (p->addr == addr)
374                         return p;
375         }
376
377         return NULL;
378 }
379 NOKPROBE_SYMBOL(get_kprobe);
380
381 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
382
383 /* Return true if the kprobe is an aggregator */
384 static inline int kprobe_aggrprobe(struct kprobe *p)
385 {
386         return p->pre_handler == aggr_pre_handler;
387 }
388
389 /* Return true(!0) if the kprobe is unused */
390 static inline int kprobe_unused(struct kprobe *p)
391 {
392         return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
393                list_empty(&p->list);
394 }
395
396 /*
397  * Keep all fields in the kprobe consistent
398  */
399 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
400 {
401         memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
402         memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
403 }
404
405 #ifdef CONFIG_OPTPROBES
406 /* NOTE: change this value only with kprobe_mutex held */
407 static bool kprobes_allow_optimization;
408
409 /*
410  * Call all pre_handler on the list, but ignores its return value.
411  * This must be called from arch-dep optimized caller.
412  */
413 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
414 {
415         struct kprobe *kp;
416
417         list_for_each_entry_rcu(kp, &p->list, list) {
418                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
419                         set_kprobe_instance(kp);
420                         kp->pre_handler(kp, regs);
421                 }
422                 reset_kprobe_instance();
423         }
424 }
425 NOKPROBE_SYMBOL(opt_pre_handler);
426
427 /* Free optimized instructions and optimized_kprobe */
428 static void free_aggr_kprobe(struct kprobe *p)
429 {
430         struct optimized_kprobe *op;
431
432         op = container_of(p, struct optimized_kprobe, kp);
433         arch_remove_optimized_kprobe(op);
434         arch_remove_kprobe(p);
435         kfree(op);
436 }
437
438 /* Return true(!0) if the kprobe is ready for optimization. */
439 static inline int kprobe_optready(struct kprobe *p)
440 {
441         struct optimized_kprobe *op;
442
443         if (kprobe_aggrprobe(p)) {
444                 op = container_of(p, struct optimized_kprobe, kp);
445                 return arch_prepared_optinsn(&op->optinsn);
446         }
447
448         return 0;
449 }
450
451 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
452 static inline int kprobe_disarmed(struct kprobe *p)
453 {
454         struct optimized_kprobe *op;
455
456         /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
457         if (!kprobe_aggrprobe(p))
458                 return kprobe_disabled(p);
459
460         op = container_of(p, struct optimized_kprobe, kp);
461
462         return kprobe_disabled(p) && list_empty(&op->list);
463 }
464
465 /* Return true(!0) if the probe is queued on (un)optimizing lists */
466 static int kprobe_queued(struct kprobe *p)
467 {
468         struct optimized_kprobe *op;
469
470         if (kprobe_aggrprobe(p)) {
471                 op = container_of(p, struct optimized_kprobe, kp);
472                 if (!list_empty(&op->list))
473                         return 1;
474         }
475         return 0;
476 }
477
478 /*
479  * Return an optimized kprobe whose optimizing code replaces
480  * instructions including addr (exclude breakpoint).
481  */
482 static struct kprobe *get_optimized_kprobe(unsigned long addr)
483 {
484         int i;
485         struct kprobe *p = NULL;
486         struct optimized_kprobe *op;
487
488         /* Don't check i == 0, since that is a breakpoint case. */
489         for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
490                 p = get_kprobe((void *)(addr - i));
491
492         if (p && kprobe_optready(p)) {
493                 op = container_of(p, struct optimized_kprobe, kp);
494                 if (arch_within_optimized_kprobe(op, addr))
495                         return p;
496         }
497
498         return NULL;
499 }
500
501 /* Optimization staging list, protected by kprobe_mutex */
502 static LIST_HEAD(optimizing_list);
503 static LIST_HEAD(unoptimizing_list);
504 static LIST_HEAD(freeing_list);
505
506 static void kprobe_optimizer(struct work_struct *work);
507 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
508 #define OPTIMIZE_DELAY 5
509
510 /*
511  * Optimize (replace a breakpoint with a jump) kprobes listed on
512  * optimizing_list.
513  */
514 static void do_optimize_kprobes(void)
515 {
516         lockdep_assert_held(&text_mutex);
517         /*
518          * The optimization/unoptimization refers online_cpus via
519          * stop_machine() and cpu-hotplug modifies online_cpus.
520          * And same time, text_mutex will be held in cpu-hotplug and here.
521          * This combination can cause a deadlock (cpu-hotplug try to lock
522          * text_mutex but stop_machine can not be done because online_cpus
523          * has been changed)
524          * To avoid this deadlock, caller must have locked cpu hotplug
525          * for preventing cpu-hotplug outside of text_mutex locking.
526          */
527         lockdep_assert_cpus_held();
528
529         /* Optimization never be done when disarmed */
530         if (kprobes_all_disarmed || !kprobes_allow_optimization ||
531             list_empty(&optimizing_list))
532                 return;
533
534         arch_optimize_kprobes(&optimizing_list);
535 }
536
537 /*
538  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
539  * if need) kprobes listed on unoptimizing_list.
540  */
541 static void do_unoptimize_kprobes(void)
542 {
543         struct optimized_kprobe *op, *tmp;
544
545         lockdep_assert_held(&text_mutex);
546         /* See comment in do_optimize_kprobes() */
547         lockdep_assert_cpus_held();
548
549         /* Unoptimization must be done anytime */
550         if (list_empty(&unoptimizing_list))
551                 return;
552
553         arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
554         /* Loop free_list for disarming */
555         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
556                 /* Switching from detour code to origin */
557                 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
558                 /* Disarm probes if marked disabled */
559                 if (kprobe_disabled(&op->kp))
560                         arch_disarm_kprobe(&op->kp);
561                 if (kprobe_unused(&op->kp)) {
562                         /*
563                          * Remove unused probes from hash list. After waiting
564                          * for synchronization, these probes are reclaimed.
565                          * (reclaiming is done by do_free_cleaned_kprobes.)
566                          */
567                         hlist_del_rcu(&op->kp.hlist);
568                 } else
569                         list_del_init(&op->list);
570         }
571 }
572
573 /* Reclaim all kprobes on the free_list */
574 static void do_free_cleaned_kprobes(void)
575 {
576         struct optimized_kprobe *op, *tmp;
577
578         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
579                 list_del_init(&op->list);
580                 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
581                         /*
582                          * This must not happen, but if there is a kprobe
583                          * still in use, keep it on kprobes hash list.
584                          */
585                         continue;
586                 }
587                 free_aggr_kprobe(&op->kp);
588         }
589 }
590
591 /* Start optimizer after OPTIMIZE_DELAY passed */
592 static void kick_kprobe_optimizer(void)
593 {
594         schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
595 }
596
597 /* Kprobe jump optimizer */
598 static void kprobe_optimizer(struct work_struct *work)
599 {
600         mutex_lock(&kprobe_mutex);
601         cpus_read_lock();
602         mutex_lock(&text_mutex);
603
604         /*
605          * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
606          * kprobes before waiting for quiesence period.
607          */
608         do_unoptimize_kprobes();
609
610         /*
611          * Step 2: Wait for quiesence period to ensure all potentially
612          * preempted tasks to have normally scheduled. Because optprobe
613          * may modify multiple instructions, there is a chance that Nth
614          * instruction is preempted. In that case, such tasks can return
615          * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
616          * Note that on non-preemptive kernel, this is transparently converted
617          * to synchronoze_sched() to wait for all interrupts to have completed.
618          */
619         synchronize_rcu_tasks();
620
621         /* Step 3: Optimize kprobes after quiesence period */
622         do_optimize_kprobes();
623
624         /* Step 4: Free cleaned kprobes after quiesence period */
625         do_free_cleaned_kprobes();
626
627         mutex_unlock(&text_mutex);
628         cpus_read_unlock();
629
630         /* Step 5: Kick optimizer again if needed */
631         if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
632                 kick_kprobe_optimizer();
633
634         mutex_unlock(&kprobe_mutex);
635 }
636
637 /* Wait for completing optimization and unoptimization */
638 void wait_for_kprobe_optimizer(void)
639 {
640         mutex_lock(&kprobe_mutex);
641
642         while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
643                 mutex_unlock(&kprobe_mutex);
644
645                 /* this will also make optimizing_work execute immmediately */
646                 flush_delayed_work(&optimizing_work);
647                 /* @optimizing_work might not have been queued yet, relax */
648                 cpu_relax();
649
650                 mutex_lock(&kprobe_mutex);
651         }
652
653         mutex_unlock(&kprobe_mutex);
654 }
655
656 static bool optprobe_queued_unopt(struct optimized_kprobe *op)
657 {
658         struct optimized_kprobe *_op;
659
660         list_for_each_entry(_op, &unoptimizing_list, list) {
661                 if (op == _op)
662                         return true;
663         }
664
665         return false;
666 }
667
668 /* Optimize kprobe if p is ready to be optimized */
669 static void optimize_kprobe(struct kprobe *p)
670 {
671         struct optimized_kprobe *op;
672
673         /* Check if the kprobe is disabled or not ready for optimization. */
674         if (!kprobe_optready(p) || !kprobes_allow_optimization ||
675             (kprobe_disabled(p) || kprobes_all_disarmed))
676                 return;
677
678         /* kprobes with post_handler can not be optimized */
679         if (p->post_handler)
680                 return;
681
682         op = container_of(p, struct optimized_kprobe, kp);
683
684         /* Check there is no other kprobes at the optimized instructions */
685         if (arch_check_optimized_kprobe(op) < 0)
686                 return;
687
688         /* Check if it is already optimized. */
689         if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
690                 if (optprobe_queued_unopt(op)) {
691                         /* This is under unoptimizing. Just dequeue the probe */
692                         list_del_init(&op->list);
693                 }
694                 return;
695         }
696         op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
697
698         /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
699         if (WARN_ON_ONCE(!list_empty(&op->list)))
700                 return;
701
702         list_add(&op->list, &optimizing_list);
703         kick_kprobe_optimizer();
704 }
705
706 /* Short cut to direct unoptimizing */
707 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
708 {
709         lockdep_assert_cpus_held();
710         arch_unoptimize_kprobe(op);
711         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
712 }
713
714 /* Unoptimize a kprobe if p is optimized */
715 static void unoptimize_kprobe(struct kprobe *p, bool force)
716 {
717         struct optimized_kprobe *op;
718
719         if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
720                 return; /* This is not an optprobe nor optimized */
721
722         op = container_of(p, struct optimized_kprobe, kp);
723         if (!kprobe_optimized(p))
724                 return;
725
726         if (!list_empty(&op->list)) {
727                 if (optprobe_queued_unopt(op)) {
728                         /* Queued in unoptimizing queue */
729                         if (force) {
730                                 /*
731                                  * Forcibly unoptimize the kprobe here, and queue it
732                                  * in the freeing list for release afterwards.
733                                  */
734                                 force_unoptimize_kprobe(op);
735                                 list_move(&op->list, &freeing_list);
736                         }
737                 } else {
738                         /* Dequeue from the optimizing queue */
739                         list_del_init(&op->list);
740                         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
741                 }
742                 return;
743         }
744
745         /* Optimized kprobe case */
746         if (force) {
747                 /* Forcibly update the code: this is a special case */
748                 force_unoptimize_kprobe(op);
749         } else {
750                 list_add(&op->list, &unoptimizing_list);
751                 kick_kprobe_optimizer();
752         }
753 }
754
755 /* Cancel unoptimizing for reusing */
756 static int reuse_unused_kprobe(struct kprobe *ap)
757 {
758         struct optimized_kprobe *op;
759
760         /*
761          * Unused kprobe MUST be on the way of delayed unoptimizing (means
762          * there is still a relative jump) and disabled.
763          */
764         op = container_of(ap, struct optimized_kprobe, kp);
765         WARN_ON_ONCE(list_empty(&op->list));
766         /* Enable the probe again */
767         ap->flags &= ~KPROBE_FLAG_DISABLED;
768         /* Optimize it again (remove from op->list) */
769         if (!kprobe_optready(ap))
770                 return -EINVAL;
771
772         optimize_kprobe(ap);
773         return 0;
774 }
775
776 /* Remove optimized instructions */
777 static void kill_optimized_kprobe(struct kprobe *p)
778 {
779         struct optimized_kprobe *op;
780
781         op = container_of(p, struct optimized_kprobe, kp);
782         if (!list_empty(&op->list))
783                 /* Dequeue from the (un)optimization queue */
784                 list_del_init(&op->list);
785         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
786
787         if (kprobe_unused(p)) {
788                 /* Enqueue if it is unused */
789                 list_add(&op->list, &freeing_list);
790                 /*
791                  * Remove unused probes from the hash list. After waiting
792                  * for synchronization, this probe is reclaimed.
793                  * (reclaiming is done by do_free_cleaned_kprobes().)
794                  */
795                 hlist_del_rcu(&op->kp.hlist);
796         }
797
798         /* Don't touch the code, because it is already freed. */
799         arch_remove_optimized_kprobe(op);
800 }
801
802 static inline
803 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
804 {
805         if (!kprobe_ftrace(p))
806                 arch_prepare_optimized_kprobe(op, p);
807 }
808
809 /* Try to prepare optimized instructions */
810 static void prepare_optimized_kprobe(struct kprobe *p)
811 {
812         struct optimized_kprobe *op;
813
814         op = container_of(p, struct optimized_kprobe, kp);
815         __prepare_optimized_kprobe(op, p);
816 }
817
818 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
819 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
820 {
821         struct optimized_kprobe *op;
822
823         op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
824         if (!op)
825                 return NULL;
826
827         INIT_LIST_HEAD(&op->list);
828         op->kp.addr = p->addr;
829         __prepare_optimized_kprobe(op, p);
830
831         return &op->kp;
832 }
833
834 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
835
836 /*
837  * Prepare an optimized_kprobe and optimize it
838  * NOTE: p must be a normal registered kprobe
839  */
840 static void try_to_optimize_kprobe(struct kprobe *p)
841 {
842         struct kprobe *ap;
843         struct optimized_kprobe *op;
844
845         /* Impossible to optimize ftrace-based kprobe */
846         if (kprobe_ftrace(p))
847                 return;
848
849         /* For preparing optimization, jump_label_text_reserved() is called */
850         cpus_read_lock();
851         jump_label_lock();
852         mutex_lock(&text_mutex);
853
854         ap = alloc_aggr_kprobe(p);
855         if (!ap)
856                 goto out;
857
858         op = container_of(ap, struct optimized_kprobe, kp);
859         if (!arch_prepared_optinsn(&op->optinsn)) {
860                 /* If failed to setup optimizing, fallback to kprobe */
861                 arch_remove_optimized_kprobe(op);
862                 kfree(op);
863                 goto out;
864         }
865
866         init_aggr_kprobe(ap, p);
867         optimize_kprobe(ap);    /* This just kicks optimizer thread */
868
869 out:
870         mutex_unlock(&text_mutex);
871         jump_label_unlock();
872         cpus_read_unlock();
873 }
874
875 static void optimize_all_kprobes(void)
876 {
877         struct hlist_head *head;
878         struct kprobe *p;
879         unsigned int i;
880
881         mutex_lock(&kprobe_mutex);
882         /* If optimization is already allowed, just return */
883         if (kprobes_allow_optimization)
884                 goto out;
885
886         cpus_read_lock();
887         kprobes_allow_optimization = true;
888         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
889                 head = &kprobe_table[i];
890                 hlist_for_each_entry(p, head, hlist)
891                         if (!kprobe_disabled(p))
892                                 optimize_kprobe(p);
893         }
894         cpus_read_unlock();
895         printk(KERN_INFO "Kprobes globally optimized\n");
896 out:
897         mutex_unlock(&kprobe_mutex);
898 }
899
900 #ifdef CONFIG_SYSCTL
901 static void unoptimize_all_kprobes(void)
902 {
903         struct hlist_head *head;
904         struct kprobe *p;
905         unsigned int i;
906
907         mutex_lock(&kprobe_mutex);
908         /* If optimization is already prohibited, just return */
909         if (!kprobes_allow_optimization) {
910                 mutex_unlock(&kprobe_mutex);
911                 return;
912         }
913
914         cpus_read_lock();
915         kprobes_allow_optimization = false;
916         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
917                 head = &kprobe_table[i];
918                 hlist_for_each_entry(p, head, hlist) {
919                         if (!kprobe_disabled(p))
920                                 unoptimize_kprobe(p, false);
921                 }
922         }
923         cpus_read_unlock();
924         mutex_unlock(&kprobe_mutex);
925
926         /* Wait for unoptimizing completion */
927         wait_for_kprobe_optimizer();
928         printk(KERN_INFO "Kprobes globally unoptimized\n");
929 }
930
931 static DEFINE_MUTEX(kprobe_sysctl_mutex);
932 int sysctl_kprobes_optimization;
933 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
934                                       void *buffer, size_t *length,
935                                       loff_t *ppos)
936 {
937         int ret;
938
939         mutex_lock(&kprobe_sysctl_mutex);
940         sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
941         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
942
943         if (sysctl_kprobes_optimization)
944                 optimize_all_kprobes();
945         else
946                 unoptimize_all_kprobes();
947         mutex_unlock(&kprobe_sysctl_mutex);
948
949         return ret;
950 }
951 #endif /* CONFIG_SYSCTL */
952
953 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
954 static void __arm_kprobe(struct kprobe *p)
955 {
956         struct kprobe *_p;
957
958         /* Check collision with other optimized kprobes */
959         _p = get_optimized_kprobe((unsigned long)p->addr);
960         if (unlikely(_p))
961                 /* Fallback to unoptimized kprobe */
962                 unoptimize_kprobe(_p, true);
963
964         arch_arm_kprobe(p);
965         optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
966 }
967
968 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
969 static void __disarm_kprobe(struct kprobe *p, bool reopt)
970 {
971         struct kprobe *_p;
972
973         /* Try to unoptimize */
974         unoptimize_kprobe(p, kprobes_all_disarmed);
975
976         if (!kprobe_queued(p)) {
977                 arch_disarm_kprobe(p);
978                 /* If another kprobe was blocked, optimize it. */
979                 _p = get_optimized_kprobe((unsigned long)p->addr);
980                 if (unlikely(_p) && reopt)
981                         optimize_kprobe(_p);
982         }
983         /* TODO: reoptimize others after unoptimized this probe */
984 }
985
986 #else /* !CONFIG_OPTPROBES */
987
988 #define optimize_kprobe(p)                      do {} while (0)
989 #define unoptimize_kprobe(p, f)                 do {} while (0)
990 #define kill_optimized_kprobe(p)                do {} while (0)
991 #define prepare_optimized_kprobe(p)             do {} while (0)
992 #define try_to_optimize_kprobe(p)               do {} while (0)
993 #define __arm_kprobe(p)                         arch_arm_kprobe(p)
994 #define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
995 #define kprobe_disarmed(p)                      kprobe_disabled(p)
996 #define wait_for_kprobe_optimizer()             do {} while (0)
997
998 static int reuse_unused_kprobe(struct kprobe *ap)
999 {
1000         /*
1001          * If the optimized kprobe is NOT supported, the aggr kprobe is
1002          * released at the same time that the last aggregated kprobe is
1003          * unregistered.
1004          * Thus there should be no chance to reuse unused kprobe.
1005          */
1006         printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
1007         return -EINVAL;
1008 }
1009
1010 static void free_aggr_kprobe(struct kprobe *p)
1011 {
1012         arch_remove_kprobe(p);
1013         kfree(p);
1014 }
1015
1016 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1017 {
1018         return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1019 }
1020 #endif /* CONFIG_OPTPROBES */
1021
1022 #ifdef CONFIG_KPROBES_ON_FTRACE
1023 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1024         .func = kprobe_ftrace_handler,
1025         .flags = FTRACE_OPS_FL_SAVE_REGS,
1026 };
1027
1028 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1029         .func = kprobe_ftrace_handler,
1030         .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1031 };
1032
1033 static int kprobe_ipmodify_enabled;
1034 static int kprobe_ftrace_enabled;
1035
1036 /* Must ensure p->addr is really on ftrace */
1037 static int prepare_kprobe(struct kprobe *p)
1038 {
1039         if (!kprobe_ftrace(p))
1040                 return arch_prepare_kprobe(p);
1041
1042         return arch_prepare_kprobe_ftrace(p);
1043 }
1044
1045 /* Caller must lock kprobe_mutex */
1046 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1047                                int *cnt)
1048 {
1049         int ret = 0;
1050
1051         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1052         if (ret) {
1053                 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
1054                          p->addr, ret);
1055                 return ret;
1056         }
1057
1058         if (*cnt == 0) {
1059                 ret = register_ftrace_function(ops);
1060                 if (ret) {
1061                         pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
1062                         goto err_ftrace;
1063                 }
1064         }
1065
1066         (*cnt)++;
1067         return ret;
1068
1069 err_ftrace:
1070         /*
1071          * At this point, sinec ops is not registered, we should be sefe from
1072          * registering empty filter.
1073          */
1074         ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1075         return ret;
1076 }
1077
1078 static int arm_kprobe_ftrace(struct kprobe *p)
1079 {
1080         bool ipmodify = (p->post_handler != NULL);
1081
1082         return __arm_kprobe_ftrace(p,
1083                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1084                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1085 }
1086
1087 /* Caller must lock kprobe_mutex */
1088 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1089                                   int *cnt)
1090 {
1091         int ret = 0;
1092
1093         if (*cnt == 1) {
1094                 ret = unregister_ftrace_function(ops);
1095                 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1096                         return ret;
1097         }
1098
1099         (*cnt)--;
1100
1101         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1102         WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1103                   p->addr, ret);
1104         return ret;
1105 }
1106
1107 static int disarm_kprobe_ftrace(struct kprobe *p)
1108 {
1109         bool ipmodify = (p->post_handler != NULL);
1110
1111         return __disarm_kprobe_ftrace(p,
1112                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1113                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1114 }
1115 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1116 static inline int prepare_kprobe(struct kprobe *p)
1117 {
1118         return arch_prepare_kprobe(p);
1119 }
1120
1121 static inline int arm_kprobe_ftrace(struct kprobe *p)
1122 {
1123         return -ENODEV;
1124 }
1125
1126 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1127 {
1128         return -ENODEV;
1129 }
1130 #endif
1131
1132 /* Arm a kprobe with text_mutex */
1133 static int arm_kprobe(struct kprobe *kp)
1134 {
1135         if (unlikely(kprobe_ftrace(kp)))
1136                 return arm_kprobe_ftrace(kp);
1137
1138         cpus_read_lock();
1139         mutex_lock(&text_mutex);
1140         __arm_kprobe(kp);
1141         mutex_unlock(&text_mutex);
1142         cpus_read_unlock();
1143
1144         return 0;
1145 }
1146
1147 /* Disarm a kprobe with text_mutex */
1148 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1149 {
1150         if (unlikely(kprobe_ftrace(kp)))
1151                 return disarm_kprobe_ftrace(kp);
1152
1153         cpus_read_lock();
1154         mutex_lock(&text_mutex);
1155         __disarm_kprobe(kp, reopt);
1156         mutex_unlock(&text_mutex);
1157         cpus_read_unlock();
1158
1159         return 0;
1160 }
1161
1162 /*
1163  * Aggregate handlers for multiple kprobes support - these handlers
1164  * take care of invoking the individual kprobe handlers on p->list
1165  */
1166 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1167 {
1168         struct kprobe *kp;
1169
1170         list_for_each_entry_rcu(kp, &p->list, list) {
1171                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1172                         set_kprobe_instance(kp);
1173                         if (kp->pre_handler(kp, regs))
1174                                 return 1;
1175                 }
1176                 reset_kprobe_instance();
1177         }
1178         return 0;
1179 }
1180 NOKPROBE_SYMBOL(aggr_pre_handler);
1181
1182 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1183                               unsigned long flags)
1184 {
1185         struct kprobe *kp;
1186
1187         list_for_each_entry_rcu(kp, &p->list, list) {
1188                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1189                         set_kprobe_instance(kp);
1190                         kp->post_handler(kp, regs, flags);
1191                         reset_kprobe_instance();
1192                 }
1193         }
1194 }
1195 NOKPROBE_SYMBOL(aggr_post_handler);
1196
1197 /* Walks the list and increments nmissed count for multiprobe case */
1198 void kprobes_inc_nmissed_count(struct kprobe *p)
1199 {
1200         struct kprobe *kp;
1201         if (!kprobe_aggrprobe(p)) {
1202                 p->nmissed++;
1203         } else {
1204                 list_for_each_entry_rcu(kp, &p->list, list)
1205                         kp->nmissed++;
1206         }
1207         return;
1208 }
1209 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1210
1211 static void free_rp_inst_rcu(struct rcu_head *head)
1212 {
1213         struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1214
1215         if (refcount_dec_and_test(&ri->rph->ref))
1216                 kfree(ri->rph);
1217         kfree(ri);
1218 }
1219 NOKPROBE_SYMBOL(free_rp_inst_rcu);
1220
1221 static void recycle_rp_inst(struct kretprobe_instance *ri)
1222 {
1223         struct kretprobe *rp = get_kretprobe(ri);
1224
1225         if (likely(rp)) {
1226                 freelist_add(&ri->freelist, &rp->freelist);
1227         } else
1228                 call_rcu(&ri->rcu, free_rp_inst_rcu);
1229 }
1230 NOKPROBE_SYMBOL(recycle_rp_inst);
1231
1232 static struct kprobe kprobe_busy = {
1233         .addr = (void *) get_kprobe,
1234 };
1235
1236 void kprobe_busy_begin(void)
1237 {
1238         struct kprobe_ctlblk *kcb;
1239
1240         preempt_disable();
1241         __this_cpu_write(current_kprobe, &kprobe_busy);
1242         kcb = get_kprobe_ctlblk();
1243         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1244 }
1245
1246 void kprobe_busy_end(void)
1247 {
1248         __this_cpu_write(current_kprobe, NULL);
1249         preempt_enable();
1250 }
1251
1252 /*
1253  * This function is called from finish_task_switch when task tk becomes dead,
1254  * so that we can recycle any function-return probe instances associated
1255  * with this task. These left over instances represent probed functions
1256  * that have been called but will never return.
1257  */
1258 void kprobe_flush_task(struct task_struct *tk)
1259 {
1260         struct kretprobe_instance *ri;
1261         struct llist_node *node;
1262
1263         /* Early boot, not yet initialized. */
1264         if (unlikely(!kprobes_initialized))
1265                 return;
1266
1267         kprobe_busy_begin();
1268
1269         node = __llist_del_all(&tk->kretprobe_instances);
1270         while (node) {
1271                 ri = container_of(node, struct kretprobe_instance, llist);
1272                 node = node->next;
1273
1274                 recycle_rp_inst(ri);
1275         }
1276
1277         kprobe_busy_end();
1278 }
1279 NOKPROBE_SYMBOL(kprobe_flush_task);
1280
1281 static inline void free_rp_inst(struct kretprobe *rp)
1282 {
1283         struct kretprobe_instance *ri;
1284         struct freelist_node *node;
1285         int count = 0;
1286
1287         node = rp->freelist.head;
1288         while (node) {
1289                 ri = container_of(node, struct kretprobe_instance, freelist);
1290                 node = node->next;
1291
1292                 kfree(ri);
1293                 count++;
1294         }
1295
1296         if (refcount_sub_and_test(count, &rp->rph->ref)) {
1297                 kfree(rp->rph);
1298                 rp->rph = NULL;
1299         }
1300 }
1301
1302 /* Add the new probe to ap->list */
1303 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1304 {
1305         if (p->post_handler)
1306                 unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1307
1308         list_add_rcu(&p->list, &ap->list);
1309         if (p->post_handler && !ap->post_handler)
1310                 ap->post_handler = aggr_post_handler;
1311
1312         return 0;
1313 }
1314
1315 /*
1316  * Fill in the required fields of the "manager kprobe". Replace the
1317  * earlier kprobe in the hlist with the manager kprobe
1318  */
1319 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1320 {
1321         /* Copy p's insn slot to ap */
1322         copy_kprobe(p, ap);
1323         flush_insn_slot(ap);
1324         ap->addr = p->addr;
1325         ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1326         ap->pre_handler = aggr_pre_handler;
1327         /* We don't care the kprobe which has gone. */
1328         if (p->post_handler && !kprobe_gone(p))
1329                 ap->post_handler = aggr_post_handler;
1330
1331         INIT_LIST_HEAD(&ap->list);
1332         INIT_HLIST_NODE(&ap->hlist);
1333
1334         list_add_rcu(&p->list, &ap->list);
1335         hlist_replace_rcu(&p->hlist, &ap->hlist);
1336 }
1337
1338 /*
1339  * This is the second or subsequent kprobe at the address - handle
1340  * the intricacies
1341  */
1342 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1343 {
1344         int ret = 0;
1345         struct kprobe *ap = orig_p;
1346
1347         cpus_read_lock();
1348
1349         /* For preparing optimization, jump_label_text_reserved() is called */
1350         jump_label_lock();
1351         mutex_lock(&text_mutex);
1352
1353         if (!kprobe_aggrprobe(orig_p)) {
1354                 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1355                 ap = alloc_aggr_kprobe(orig_p);
1356                 if (!ap) {
1357                         ret = -ENOMEM;
1358                         goto out;
1359                 }
1360                 init_aggr_kprobe(ap, orig_p);
1361         } else if (kprobe_unused(ap)) {
1362                 /* This probe is going to die. Rescue it */
1363                 ret = reuse_unused_kprobe(ap);
1364                 if (ret)
1365                         goto out;
1366         }
1367
1368         if (kprobe_gone(ap)) {
1369                 /*
1370                  * Attempting to insert new probe at the same location that
1371                  * had a probe in the module vaddr area which already
1372                  * freed. So, the instruction slot has already been
1373                  * released. We need a new slot for the new probe.
1374                  */
1375                 ret = arch_prepare_kprobe(ap);
1376                 if (ret)
1377                         /*
1378                          * Even if fail to allocate new slot, don't need to
1379                          * free aggr_probe. It will be used next time, or
1380                          * freed by unregister_kprobe.
1381                          */
1382                         goto out;
1383
1384                 /* Prepare optimized instructions if possible. */
1385                 prepare_optimized_kprobe(ap);
1386
1387                 /*
1388                  * Clear gone flag to prevent allocating new slot again, and
1389                  * set disabled flag because it is not armed yet.
1390                  */
1391                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1392                             | KPROBE_FLAG_DISABLED;
1393         }
1394
1395         /* Copy ap's insn slot to p */
1396         copy_kprobe(ap, p);
1397         ret = add_new_kprobe(ap, p);
1398
1399 out:
1400         mutex_unlock(&text_mutex);
1401         jump_label_unlock();
1402         cpus_read_unlock();
1403
1404         if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1405                 ap->flags &= ~KPROBE_FLAG_DISABLED;
1406                 if (!kprobes_all_disarmed) {
1407                         /* Arm the breakpoint again. */
1408                         ret = arm_kprobe(ap);
1409                         if (ret) {
1410                                 ap->flags |= KPROBE_FLAG_DISABLED;
1411                                 list_del_rcu(&p->list);
1412                                 synchronize_rcu();
1413                         }
1414                 }
1415         }
1416         return ret;
1417 }
1418
1419 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1420 {
1421         /* The __kprobes marked functions and entry code must not be probed */
1422         return addr >= (unsigned long)__kprobes_text_start &&
1423                addr < (unsigned long)__kprobes_text_end;
1424 }
1425
1426 static bool __within_kprobe_blacklist(unsigned long addr)
1427 {
1428         struct kprobe_blacklist_entry *ent;
1429
1430         if (arch_within_kprobe_blacklist(addr))
1431                 return true;
1432         /*
1433          * If there exists a kprobe_blacklist, verify and
1434          * fail any probe registration in the prohibited area
1435          */
1436         list_for_each_entry(ent, &kprobe_blacklist, list) {
1437                 if (addr >= ent->start_addr && addr < ent->end_addr)
1438                         return true;
1439         }
1440         return false;
1441 }
1442
1443 bool within_kprobe_blacklist(unsigned long addr)
1444 {
1445         char symname[KSYM_NAME_LEN], *p;
1446
1447         if (__within_kprobe_blacklist(addr))
1448                 return true;
1449
1450         /* Check if the address is on a suffixed-symbol */
1451         if (!lookup_symbol_name(addr, symname)) {
1452                 p = strchr(symname, '.');
1453                 if (!p)
1454                         return false;
1455                 *p = '\0';
1456                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1457                 if (addr)
1458                         return __within_kprobe_blacklist(addr);
1459         }
1460         return false;
1461 }
1462
1463 /*
1464  * If we have a symbol_name argument, look it up and add the offset field
1465  * to it. This way, we can specify a relative address to a symbol.
1466  * This returns encoded errors if it fails to look up symbol or invalid
1467  * combination of parameters.
1468  */
1469 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1470                         const char *symbol_name, unsigned int offset)
1471 {
1472         if ((symbol_name && addr) || (!symbol_name && !addr))
1473                 goto invalid;
1474
1475         if (symbol_name) {
1476                 addr = kprobe_lookup_name(symbol_name, offset);
1477                 if (!addr)
1478                         return ERR_PTR(-ENOENT);
1479         }
1480
1481         addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1482         if (addr)
1483                 return addr;
1484
1485 invalid:
1486         return ERR_PTR(-EINVAL);
1487 }
1488
1489 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1490 {
1491         return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1492 }
1493
1494 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1495 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1496 {
1497         struct kprobe *ap, *list_p;
1498
1499         lockdep_assert_held(&kprobe_mutex);
1500
1501         ap = get_kprobe(p->addr);
1502         if (unlikely(!ap))
1503                 return NULL;
1504
1505         if (p != ap) {
1506                 list_for_each_entry(list_p, &ap->list, list)
1507                         if (list_p == p)
1508                         /* kprobe p is a valid probe */
1509                                 goto valid;
1510                 return NULL;
1511         }
1512 valid:
1513         return ap;
1514 }
1515
1516 /*
1517  * Warn and return error if the kprobe is being re-registered since
1518  * there must be a software bug.
1519  */
1520 static inline int warn_kprobe_rereg(struct kprobe *p)
1521 {
1522         int ret = 0;
1523
1524         mutex_lock(&kprobe_mutex);
1525         if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1526                 ret = -EINVAL;
1527         mutex_unlock(&kprobe_mutex);
1528
1529         return ret;
1530 }
1531
1532 int __weak arch_check_ftrace_location(struct kprobe *p)
1533 {
1534         unsigned long ftrace_addr;
1535
1536         ftrace_addr = ftrace_location((unsigned long)p->addr);
1537         if (ftrace_addr) {
1538 #ifdef CONFIG_KPROBES_ON_FTRACE
1539                 /* Given address is not on the instruction boundary */
1540                 if ((unsigned long)p->addr != ftrace_addr)
1541                         return -EILSEQ;
1542                 p->flags |= KPROBE_FLAG_FTRACE;
1543 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1544                 return -EINVAL;
1545 #endif
1546         }
1547         return 0;
1548 }
1549
1550 static int check_kprobe_address_safe(struct kprobe *p,
1551                                      struct module **probed_mod)
1552 {
1553         int ret;
1554
1555         ret = arch_check_ftrace_location(p);
1556         if (ret)
1557                 return ret;
1558         jump_label_lock();
1559         preempt_disable();
1560
1561         /* Ensure it is not in reserved area nor out of text */
1562         if (!kernel_text_address((unsigned long) p->addr) ||
1563             within_kprobe_blacklist((unsigned long) p->addr) ||
1564             jump_label_text_reserved(p->addr, p->addr) ||
1565             static_call_text_reserved(p->addr, p->addr) ||
1566             find_bug((unsigned long)p->addr)) {
1567                 ret = -EINVAL;
1568                 goto out;
1569         }
1570
1571         /* Check if are we probing a module */
1572         *probed_mod = __module_text_address((unsigned long) p->addr);
1573         if (*probed_mod) {
1574                 /*
1575                  * We must hold a refcount of the probed module while updating
1576                  * its code to prohibit unexpected unloading.
1577                  */
1578                 if (unlikely(!try_module_get(*probed_mod))) {
1579                         ret = -ENOENT;
1580                         goto out;
1581                 }
1582
1583                 /*
1584                  * If the module freed .init.text, we couldn't insert
1585                  * kprobes in there.
1586                  */
1587                 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1588                     (*probed_mod)->state != MODULE_STATE_COMING) {
1589                         module_put(*probed_mod);
1590                         *probed_mod = NULL;
1591                         ret = -ENOENT;
1592                 }
1593         }
1594 out:
1595         preempt_enable();
1596         jump_label_unlock();
1597
1598         return ret;
1599 }
1600
1601 int register_kprobe(struct kprobe *p)
1602 {
1603         int ret;
1604         struct kprobe *old_p;
1605         struct module *probed_mod;
1606         kprobe_opcode_t *addr;
1607
1608         /* Adjust probe address from symbol */
1609         addr = kprobe_addr(p);
1610         if (IS_ERR(addr))
1611                 return PTR_ERR(addr);
1612         p->addr = addr;
1613
1614         ret = warn_kprobe_rereg(p);
1615         if (ret)
1616                 return ret;
1617
1618         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1619         p->flags &= KPROBE_FLAG_DISABLED;
1620         p->nmissed = 0;
1621         INIT_LIST_HEAD(&p->list);
1622
1623         ret = check_kprobe_address_safe(p, &probed_mod);
1624         if (ret)
1625                 return ret;
1626
1627         mutex_lock(&kprobe_mutex);
1628
1629         old_p = get_kprobe(p->addr);
1630         if (old_p) {
1631                 /* Since this may unoptimize old_p, locking text_mutex. */
1632                 ret = register_aggr_kprobe(old_p, p);
1633                 goto out;
1634         }
1635
1636         cpus_read_lock();
1637         /* Prevent text modification */
1638         mutex_lock(&text_mutex);
1639         ret = prepare_kprobe(p);
1640         mutex_unlock(&text_mutex);
1641         cpus_read_unlock();
1642         if (ret)
1643                 goto out;
1644
1645         INIT_HLIST_NODE(&p->hlist);
1646         hlist_add_head_rcu(&p->hlist,
1647                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1648
1649         if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1650                 ret = arm_kprobe(p);
1651                 if (ret) {
1652                         hlist_del_rcu(&p->hlist);
1653                         synchronize_rcu();
1654                         goto out;
1655                 }
1656         }
1657
1658         /* Try to optimize kprobe */
1659         try_to_optimize_kprobe(p);
1660 out:
1661         mutex_unlock(&kprobe_mutex);
1662
1663         if (probed_mod)
1664                 module_put(probed_mod);
1665
1666         return ret;
1667 }
1668 EXPORT_SYMBOL_GPL(register_kprobe);
1669
1670 /* Check if all probes on the aggrprobe are disabled */
1671 static int aggr_kprobe_disabled(struct kprobe *ap)
1672 {
1673         struct kprobe *kp;
1674
1675         lockdep_assert_held(&kprobe_mutex);
1676
1677         list_for_each_entry(kp, &ap->list, list)
1678                 if (!kprobe_disabled(kp))
1679                         /*
1680                          * There is an active probe on the list.
1681                          * We can't disable this ap.
1682                          */
1683                         return 0;
1684
1685         return 1;
1686 }
1687
1688 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1689 static struct kprobe *__disable_kprobe(struct kprobe *p)
1690 {
1691         struct kprobe *orig_p;
1692         int ret;
1693
1694         /* Get an original kprobe for return */
1695         orig_p = __get_valid_kprobe(p);
1696         if (unlikely(orig_p == NULL))
1697                 return ERR_PTR(-EINVAL);
1698
1699         if (!kprobe_disabled(p)) {
1700                 /* Disable probe if it is a child probe */
1701                 if (p != orig_p)
1702                         p->flags |= KPROBE_FLAG_DISABLED;
1703
1704                 /* Try to disarm and disable this/parent probe */
1705                 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1706                         /*
1707                          * If kprobes_all_disarmed is set, orig_p
1708                          * should have already been disarmed, so
1709                          * skip unneed disarming process.
1710                          */
1711                         if (!kprobes_all_disarmed) {
1712                                 ret = disarm_kprobe(orig_p, true);
1713                                 if (ret) {
1714                                         p->flags &= ~KPROBE_FLAG_DISABLED;
1715                                         return ERR_PTR(ret);
1716                                 }
1717                         }
1718                         orig_p->flags |= KPROBE_FLAG_DISABLED;
1719                 }
1720         }
1721
1722         return orig_p;
1723 }
1724
1725 /*
1726  * Unregister a kprobe without a scheduler synchronization.
1727  */
1728 static int __unregister_kprobe_top(struct kprobe *p)
1729 {
1730         struct kprobe *ap, *list_p;
1731
1732         /* Disable kprobe. This will disarm it if needed. */
1733         ap = __disable_kprobe(p);
1734         if (IS_ERR(ap))
1735                 return PTR_ERR(ap);
1736
1737         if (ap == p)
1738                 /*
1739                  * This probe is an independent(and non-optimized) kprobe
1740                  * (not an aggrprobe). Remove from the hash list.
1741                  */
1742                 goto disarmed;
1743
1744         /* Following process expects this probe is an aggrprobe */
1745         WARN_ON(!kprobe_aggrprobe(ap));
1746
1747         if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1748                 /*
1749                  * !disarmed could be happen if the probe is under delayed
1750                  * unoptimizing.
1751                  */
1752                 goto disarmed;
1753         else {
1754                 /* If disabling probe has special handlers, update aggrprobe */
1755                 if (p->post_handler && !kprobe_gone(p)) {
1756                         list_for_each_entry(list_p, &ap->list, list) {
1757                                 if ((list_p != p) && (list_p->post_handler))
1758                                         goto noclean;
1759                         }
1760                         ap->post_handler = NULL;
1761                 }
1762 noclean:
1763                 /*
1764                  * Remove from the aggrprobe: this path will do nothing in
1765                  * __unregister_kprobe_bottom().
1766                  */
1767                 list_del_rcu(&p->list);
1768                 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1769                         /*
1770                          * Try to optimize this probe again, because post
1771                          * handler may have been changed.
1772                          */
1773                         optimize_kprobe(ap);
1774         }
1775         return 0;
1776
1777 disarmed:
1778         hlist_del_rcu(&ap->hlist);
1779         return 0;
1780 }
1781
1782 static void __unregister_kprobe_bottom(struct kprobe *p)
1783 {
1784         struct kprobe *ap;
1785
1786         if (list_empty(&p->list))
1787                 /* This is an independent kprobe */
1788                 arch_remove_kprobe(p);
1789         else if (list_is_singular(&p->list)) {
1790                 /* This is the last child of an aggrprobe */
1791                 ap = list_entry(p->list.next, struct kprobe, list);
1792                 list_del(&p->list);
1793                 free_aggr_kprobe(ap);
1794         }
1795         /* Otherwise, do nothing. */
1796 }
1797
1798 int register_kprobes(struct kprobe **kps, int num)
1799 {
1800         int i, ret = 0;
1801
1802         if (num <= 0)
1803                 return -EINVAL;
1804         for (i = 0; i < num; i++) {
1805                 ret = register_kprobe(kps[i]);
1806                 if (ret < 0) {
1807                         if (i > 0)
1808                                 unregister_kprobes(kps, i);
1809                         break;
1810                 }
1811         }
1812         return ret;
1813 }
1814 EXPORT_SYMBOL_GPL(register_kprobes);
1815
1816 void unregister_kprobe(struct kprobe *p)
1817 {
1818         unregister_kprobes(&p, 1);
1819 }
1820 EXPORT_SYMBOL_GPL(unregister_kprobe);
1821
1822 void unregister_kprobes(struct kprobe **kps, int num)
1823 {
1824         int i;
1825
1826         if (num <= 0)
1827                 return;
1828         mutex_lock(&kprobe_mutex);
1829         for (i = 0; i < num; i++)
1830                 if (__unregister_kprobe_top(kps[i]) < 0)
1831                         kps[i]->addr = NULL;
1832         mutex_unlock(&kprobe_mutex);
1833
1834         synchronize_rcu();
1835         for (i = 0; i < num; i++)
1836                 if (kps[i]->addr)
1837                         __unregister_kprobe_bottom(kps[i]);
1838 }
1839 EXPORT_SYMBOL_GPL(unregister_kprobes);
1840
1841 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1842                                         unsigned long val, void *data)
1843 {
1844         return NOTIFY_DONE;
1845 }
1846 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1847
1848 static struct notifier_block kprobe_exceptions_nb = {
1849         .notifier_call = kprobe_exceptions_notify,
1850         .priority = 0x7fffffff /* we need to be notified first */
1851 };
1852
1853 unsigned long __weak arch_deref_entry_point(void *entry)
1854 {
1855         return (unsigned long)entry;
1856 }
1857
1858 #ifdef CONFIG_KRETPROBES
1859
1860 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1861                                              void *trampoline_address,
1862                                              void *frame_pointer)
1863 {
1864         kprobe_opcode_t *correct_ret_addr = NULL;
1865         struct kretprobe_instance *ri = NULL;
1866         struct llist_node *first, *node;
1867         struct kretprobe *rp;
1868
1869         /* Find all nodes for this frame. */
1870         first = node = current->kretprobe_instances.first;
1871         while (node) {
1872                 ri = container_of(node, struct kretprobe_instance, llist);
1873
1874                 BUG_ON(ri->fp != frame_pointer);
1875
1876                 if (ri->ret_addr != trampoline_address) {
1877                         correct_ret_addr = ri->ret_addr;
1878                         /*
1879                          * This is the real return address. Any other
1880                          * instances associated with this task are for
1881                          * other calls deeper on the call stack
1882                          */
1883                         goto found;
1884                 }
1885
1886                 node = node->next;
1887         }
1888         pr_err("Oops! Kretprobe fails to find correct return address.\n");
1889         BUG_ON(1);
1890
1891 found:
1892         /* Unlink all nodes for this frame. */
1893         current->kretprobe_instances.first = node->next;
1894         node->next = NULL;
1895
1896         /* Run them..  */
1897         while (first) {
1898                 ri = container_of(first, struct kretprobe_instance, llist);
1899                 first = first->next;
1900
1901                 rp = get_kretprobe(ri);
1902                 if (rp && rp->handler) {
1903                         struct kprobe *prev = kprobe_running();
1904
1905                         __this_cpu_write(current_kprobe, &rp->kp);
1906                         ri->ret_addr = correct_ret_addr;
1907                         rp->handler(ri, regs);
1908                         __this_cpu_write(current_kprobe, prev);
1909                 }
1910
1911                 recycle_rp_inst(ri);
1912         }
1913
1914         return (unsigned long)correct_ret_addr;
1915 }
1916 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
1917
1918 /*
1919  * This kprobe pre_handler is registered with every kretprobe. When probe
1920  * hits it will set up the return probe.
1921  */
1922 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1923 {
1924         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1925         struct kretprobe_instance *ri;
1926         struct freelist_node *fn;
1927
1928         fn = freelist_try_get(&rp->freelist);
1929         if (!fn) {
1930                 rp->nmissed++;
1931                 return 0;
1932         }
1933
1934         ri = container_of(fn, struct kretprobe_instance, freelist);
1935
1936         if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1937                 freelist_add(&ri->freelist, &rp->freelist);
1938                 return 0;
1939         }
1940
1941         arch_prepare_kretprobe(ri, regs);
1942
1943         __llist_add(&ri->llist, &current->kretprobe_instances);
1944
1945         return 0;
1946 }
1947 NOKPROBE_SYMBOL(pre_handler_kretprobe);
1948
1949 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1950 {
1951         return !offset;
1952 }
1953
1954 /**
1955  * kprobe_on_func_entry() -- check whether given address is function entry
1956  * @addr: Target address
1957  * @sym:  Target symbol name
1958  * @offset: The offset from the symbol or the address
1959  *
1960  * This checks whether the given @addr+@offset or @sym+@offset is on the
1961  * function entry address or not.
1962  * This returns 0 if it is the function entry, or -EINVAL if it is not.
1963  * And also it returns -ENOENT if it fails the symbol or address lookup.
1964  * Caller must pass @addr or @sym (either one must be NULL), or this
1965  * returns -EINVAL.
1966  */
1967 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1968 {
1969         kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1970
1971         if (IS_ERR(kp_addr))
1972                 return PTR_ERR(kp_addr);
1973
1974         if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
1975                 return -ENOENT;
1976
1977         if (!arch_kprobe_on_func_entry(offset))
1978                 return -EINVAL;
1979
1980         return 0;
1981 }
1982
1983 int register_kretprobe(struct kretprobe *rp)
1984 {
1985         int ret;
1986         struct kretprobe_instance *inst;
1987         int i;
1988         void *addr;
1989
1990         ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
1991         if (ret)
1992                 return ret;
1993
1994         /* If only rp->kp.addr is specified, check reregistering kprobes */
1995         if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
1996                 return -EINVAL;
1997
1998         if (kretprobe_blacklist_size) {
1999                 addr = kprobe_addr(&rp->kp);
2000                 if (IS_ERR(addr))
2001                         return PTR_ERR(addr);
2002
2003                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2004                         if (kretprobe_blacklist[i].addr == addr)
2005                                 return -EINVAL;
2006                 }
2007         }
2008
2009         rp->kp.pre_handler = pre_handler_kretprobe;
2010         rp->kp.post_handler = NULL;
2011
2012         /* Pre-allocate memory for max kretprobe instances */
2013         if (rp->maxactive <= 0) {
2014 #ifdef CONFIG_PREEMPTION
2015                 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2016 #else
2017                 rp->maxactive = num_possible_cpus();
2018 #endif
2019         }
2020         rp->freelist.head = NULL;
2021         rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2022         if (!rp->rph)
2023                 return -ENOMEM;
2024
2025         rp->rph->rp = rp;
2026         for (i = 0; i < rp->maxactive; i++) {
2027                 inst = kzalloc(sizeof(struct kretprobe_instance) +
2028                                rp->data_size, GFP_KERNEL);
2029                 if (inst == NULL) {
2030                         refcount_set(&rp->rph->ref, i);
2031                         free_rp_inst(rp);
2032                         return -ENOMEM;
2033                 }
2034                 inst->rph = rp->rph;
2035                 freelist_add(&inst->freelist, &rp->freelist);
2036         }
2037         refcount_set(&rp->rph->ref, i);
2038
2039         rp->nmissed = 0;
2040         /* Establish function entry probe point */
2041         ret = register_kprobe(&rp->kp);
2042         if (ret != 0)
2043                 free_rp_inst(rp);
2044         return ret;
2045 }
2046 EXPORT_SYMBOL_GPL(register_kretprobe);
2047
2048 int register_kretprobes(struct kretprobe **rps, int num)
2049 {
2050         int ret = 0, i;
2051
2052         if (num <= 0)
2053                 return -EINVAL;
2054         for (i = 0; i < num; i++) {
2055                 ret = register_kretprobe(rps[i]);
2056                 if (ret < 0) {
2057                         if (i > 0)
2058                                 unregister_kretprobes(rps, i);
2059                         break;
2060                 }
2061         }
2062         return ret;
2063 }
2064 EXPORT_SYMBOL_GPL(register_kretprobes);
2065
2066 void unregister_kretprobe(struct kretprobe *rp)
2067 {
2068         unregister_kretprobes(&rp, 1);
2069 }
2070 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2071
2072 void unregister_kretprobes(struct kretprobe **rps, int num)
2073 {
2074         int i;
2075
2076         if (num <= 0)
2077                 return;
2078         mutex_lock(&kprobe_mutex);
2079         for (i = 0; i < num; i++) {
2080                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2081                         rps[i]->kp.addr = NULL;
2082                 rps[i]->rph->rp = NULL;
2083         }
2084         mutex_unlock(&kprobe_mutex);
2085
2086         synchronize_rcu();
2087         for (i = 0; i < num; i++) {
2088                 if (rps[i]->kp.addr) {
2089                         __unregister_kprobe_bottom(&rps[i]->kp);
2090                         free_rp_inst(rps[i]);
2091                 }
2092         }
2093 }
2094 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2095
2096 #else /* CONFIG_KRETPROBES */
2097 int register_kretprobe(struct kretprobe *rp)
2098 {
2099         return -ENOSYS;
2100 }
2101 EXPORT_SYMBOL_GPL(register_kretprobe);
2102
2103 int register_kretprobes(struct kretprobe **rps, int num)
2104 {
2105         return -ENOSYS;
2106 }
2107 EXPORT_SYMBOL_GPL(register_kretprobes);
2108
2109 void unregister_kretprobe(struct kretprobe *rp)
2110 {
2111 }
2112 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2113
2114 void unregister_kretprobes(struct kretprobe **rps, int num)
2115 {
2116 }
2117 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2118
2119 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2120 {
2121         return 0;
2122 }
2123 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2124
2125 #endif /* CONFIG_KRETPROBES */
2126
2127 /* Set the kprobe gone and remove its instruction buffer. */
2128 static void kill_kprobe(struct kprobe *p)
2129 {
2130         struct kprobe *kp;
2131
2132         lockdep_assert_held(&kprobe_mutex);
2133
2134         p->flags |= KPROBE_FLAG_GONE;
2135         if (kprobe_aggrprobe(p)) {
2136                 /*
2137                  * If this is an aggr_kprobe, we have to list all the
2138                  * chained probes and mark them GONE.
2139                  */
2140                 list_for_each_entry(kp, &p->list, list)
2141                         kp->flags |= KPROBE_FLAG_GONE;
2142                 p->post_handler = NULL;
2143                 kill_optimized_kprobe(p);
2144         }
2145         /*
2146          * Here, we can remove insn_slot safely, because no thread calls
2147          * the original probed function (which will be freed soon) any more.
2148          */
2149         arch_remove_kprobe(p);
2150
2151         /*
2152          * The module is going away. We should disarm the kprobe which
2153          * is using ftrace, because ftrace framework is still available at
2154          * MODULE_STATE_GOING notification.
2155          */
2156         if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2157                 disarm_kprobe_ftrace(p);
2158 }
2159
2160 /* Disable one kprobe */
2161 int disable_kprobe(struct kprobe *kp)
2162 {
2163         int ret = 0;
2164         struct kprobe *p;
2165
2166         mutex_lock(&kprobe_mutex);
2167
2168         /* Disable this kprobe */
2169         p = __disable_kprobe(kp);
2170         if (IS_ERR(p))
2171                 ret = PTR_ERR(p);
2172
2173         mutex_unlock(&kprobe_mutex);
2174         return ret;
2175 }
2176 EXPORT_SYMBOL_GPL(disable_kprobe);
2177
2178 /* Enable one kprobe */
2179 int enable_kprobe(struct kprobe *kp)
2180 {
2181         int ret = 0;
2182         struct kprobe *p;
2183
2184         mutex_lock(&kprobe_mutex);
2185
2186         /* Check whether specified probe is valid. */
2187         p = __get_valid_kprobe(kp);
2188         if (unlikely(p == NULL)) {
2189                 ret = -EINVAL;
2190                 goto out;
2191         }
2192
2193         if (kprobe_gone(kp)) {
2194                 /* This kprobe has gone, we couldn't enable it. */
2195                 ret = -EINVAL;
2196                 goto out;
2197         }
2198
2199         if (p != kp)
2200                 kp->flags &= ~KPROBE_FLAG_DISABLED;
2201
2202         if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2203                 p->flags &= ~KPROBE_FLAG_DISABLED;
2204                 ret = arm_kprobe(p);
2205                 if (ret)
2206                         p->flags |= KPROBE_FLAG_DISABLED;
2207         }
2208 out:
2209         mutex_unlock(&kprobe_mutex);
2210         return ret;
2211 }
2212 EXPORT_SYMBOL_GPL(enable_kprobe);
2213
2214 /* Caller must NOT call this in usual path. This is only for critical case */
2215 void dump_kprobe(struct kprobe *kp)
2216 {
2217         pr_err("Dumping kprobe:\n");
2218         pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2219                kp->symbol_name, kp->offset, kp->addr);
2220 }
2221 NOKPROBE_SYMBOL(dump_kprobe);
2222
2223 int kprobe_add_ksym_blacklist(unsigned long entry)
2224 {
2225         struct kprobe_blacklist_entry *ent;
2226         unsigned long offset = 0, size = 0;
2227
2228         if (!kernel_text_address(entry) ||
2229             !kallsyms_lookup_size_offset(entry, &size, &offset))
2230                 return -EINVAL;
2231
2232         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2233         if (!ent)
2234                 return -ENOMEM;
2235         ent->start_addr = entry;
2236         ent->end_addr = entry + size;
2237         INIT_LIST_HEAD(&ent->list);
2238         list_add_tail(&ent->list, &kprobe_blacklist);
2239
2240         return (int)size;
2241 }
2242
2243 /* Add all symbols in given area into kprobe blacklist */
2244 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2245 {
2246         unsigned long entry;
2247         int ret = 0;
2248
2249         for (entry = start; entry < end; entry += ret) {
2250                 ret = kprobe_add_ksym_blacklist(entry);
2251                 if (ret < 0)
2252                         return ret;
2253                 if (ret == 0)   /* In case of alias symbol */
2254                         ret = 1;
2255         }
2256         return 0;
2257 }
2258
2259 /* Remove all symbols in given area from kprobe blacklist */
2260 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2261 {
2262         struct kprobe_blacklist_entry *ent, *n;
2263
2264         list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2265                 if (ent->start_addr < start || ent->start_addr >= end)
2266                         continue;
2267                 list_del(&ent->list);
2268                 kfree(ent);
2269         }
2270 }
2271
2272 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2273 {
2274         kprobe_remove_area_blacklist(entry, entry + 1);
2275 }
2276
2277 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2278                                    char *type, char *sym)
2279 {
2280         return -ERANGE;
2281 }
2282
2283 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2284                        char *sym)
2285 {
2286 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2287         if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2288                 return 0;
2289 #ifdef CONFIG_OPTPROBES
2290         if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2291                 return 0;
2292 #endif
2293 #endif
2294         if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2295                 return 0;
2296         return -ERANGE;
2297 }
2298
2299 int __init __weak arch_populate_kprobe_blacklist(void)
2300 {
2301         return 0;
2302 }
2303
2304 /*
2305  * Lookup and populate the kprobe_blacklist.
2306  *
2307  * Unlike the kretprobe blacklist, we'll need to determine
2308  * the range of addresses that belong to the said functions,
2309  * since a kprobe need not necessarily be at the beginning
2310  * of a function.
2311  */
2312 static int __init populate_kprobe_blacklist(unsigned long *start,
2313                                              unsigned long *end)
2314 {
2315         unsigned long entry;
2316         unsigned long *iter;
2317         int ret;
2318
2319         for (iter = start; iter < end; iter++) {
2320                 entry = arch_deref_entry_point((void *)*iter);
2321                 ret = kprobe_add_ksym_blacklist(entry);
2322                 if (ret == -EINVAL)
2323                         continue;
2324                 if (ret < 0)
2325                         return ret;
2326         }
2327
2328         /* Symbols in __kprobes_text are blacklisted */
2329         ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2330                                         (unsigned long)__kprobes_text_end);
2331         if (ret)
2332                 return ret;
2333
2334         /* Symbols in noinstr section are blacklisted */
2335         ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2336                                         (unsigned long)__noinstr_text_end);
2337
2338         return ret ? : arch_populate_kprobe_blacklist();
2339 }
2340
2341 static void add_module_kprobe_blacklist(struct module *mod)
2342 {
2343         unsigned long start, end;
2344         int i;
2345
2346         if (mod->kprobe_blacklist) {
2347                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2348                         kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2349         }
2350
2351         start = (unsigned long)mod->kprobes_text_start;
2352         if (start) {
2353                 end = start + mod->kprobes_text_size;
2354                 kprobe_add_area_blacklist(start, end);
2355         }
2356
2357         start = (unsigned long)mod->noinstr_text_start;
2358         if (start) {
2359                 end = start + mod->noinstr_text_size;
2360                 kprobe_add_area_blacklist(start, end);
2361         }
2362 }
2363
2364 static void remove_module_kprobe_blacklist(struct module *mod)
2365 {
2366         unsigned long start, end;
2367         int i;
2368
2369         if (mod->kprobe_blacklist) {
2370                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2371                         kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2372         }
2373
2374         start = (unsigned long)mod->kprobes_text_start;
2375         if (start) {
2376                 end = start + mod->kprobes_text_size;
2377                 kprobe_remove_area_blacklist(start, end);
2378         }
2379
2380         start = (unsigned long)mod->noinstr_text_start;
2381         if (start) {
2382                 end = start + mod->noinstr_text_size;
2383                 kprobe_remove_area_blacklist(start, end);
2384         }
2385 }
2386
2387 /* Module notifier call back, checking kprobes on the module */
2388 static int kprobes_module_callback(struct notifier_block *nb,
2389                                    unsigned long val, void *data)
2390 {
2391         struct module *mod = data;
2392         struct hlist_head *head;
2393         struct kprobe *p;
2394         unsigned int i;
2395         int checkcore = (val == MODULE_STATE_GOING);
2396
2397         if (val == MODULE_STATE_COMING) {
2398                 mutex_lock(&kprobe_mutex);
2399                 add_module_kprobe_blacklist(mod);
2400                 mutex_unlock(&kprobe_mutex);
2401         }
2402         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2403                 return NOTIFY_DONE;
2404
2405         /*
2406          * When MODULE_STATE_GOING was notified, both of module .text and
2407          * .init.text sections would be freed. When MODULE_STATE_LIVE was
2408          * notified, only .init.text section would be freed. We need to
2409          * disable kprobes which have been inserted in the sections.
2410          */
2411         mutex_lock(&kprobe_mutex);
2412         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2413                 head = &kprobe_table[i];
2414                 hlist_for_each_entry(p, head, hlist)
2415                         if (within_module_init((unsigned long)p->addr, mod) ||
2416                             (checkcore &&
2417                              within_module_core((unsigned long)p->addr, mod))) {
2418                                 /*
2419                                  * The vaddr this probe is installed will soon
2420                                  * be vfreed buy not synced to disk. Hence,
2421                                  * disarming the breakpoint isn't needed.
2422                                  *
2423                                  * Note, this will also move any optimized probes
2424                                  * that are pending to be removed from their
2425                                  * corresponding lists to the freeing_list and
2426                                  * will not be touched by the delayed
2427                                  * kprobe_optimizer work handler.
2428                                  */
2429                                 kill_kprobe(p);
2430                         }
2431         }
2432         if (val == MODULE_STATE_GOING)
2433                 remove_module_kprobe_blacklist(mod);
2434         mutex_unlock(&kprobe_mutex);
2435         return NOTIFY_DONE;
2436 }
2437
2438 static struct notifier_block kprobe_module_nb = {
2439         .notifier_call = kprobes_module_callback,
2440         .priority = 0
2441 };
2442
2443 /* Markers of _kprobe_blacklist section */
2444 extern unsigned long __start_kprobe_blacklist[];
2445 extern unsigned long __stop_kprobe_blacklist[];
2446
2447 void kprobe_free_init_mem(void)
2448 {
2449         void *start = (void *)(&__init_begin);
2450         void *end = (void *)(&__init_end);
2451         struct hlist_head *head;
2452         struct kprobe *p;
2453         int i;
2454
2455         mutex_lock(&kprobe_mutex);
2456
2457         /* Kill all kprobes on initmem */
2458         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2459                 head = &kprobe_table[i];
2460                 hlist_for_each_entry(p, head, hlist) {
2461                         if (start <= (void *)p->addr && (void *)p->addr < end)
2462                                 kill_kprobe(p);
2463                 }
2464         }
2465
2466         mutex_unlock(&kprobe_mutex);
2467 }
2468
2469 static int __init init_kprobes(void)
2470 {
2471         int i, err = 0;
2472
2473         /* FIXME allocate the probe table, currently defined statically */
2474         /* initialize all list heads */
2475         for (i = 0; i < KPROBE_TABLE_SIZE; i++)
2476                 INIT_HLIST_HEAD(&kprobe_table[i]);
2477
2478         err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2479                                         __stop_kprobe_blacklist);
2480         if (err) {
2481                 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2482                 pr_err("Please take care of using kprobes.\n");
2483         }
2484
2485         if (kretprobe_blacklist_size) {
2486                 /* lookup the function address from its name */
2487                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2488                         kretprobe_blacklist[i].addr =
2489                                 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2490                         if (!kretprobe_blacklist[i].addr)
2491                                 printk("kretprobe: lookup failed: %s\n",
2492                                        kretprobe_blacklist[i].name);
2493                 }
2494         }
2495
2496         /* By default, kprobes are armed */
2497         kprobes_all_disarmed = false;
2498
2499 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2500         /* Init kprobe_optinsn_slots for allocation */
2501         kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2502 #endif
2503
2504         err = arch_init_kprobes();
2505         if (!err)
2506                 err = register_die_notifier(&kprobe_exceptions_nb);
2507         if (!err)
2508                 err = register_module_notifier(&kprobe_module_nb);
2509
2510         kprobes_initialized = (err == 0);
2511
2512         if (!err)
2513                 init_test_probes();
2514         return err;
2515 }
2516 early_initcall(init_kprobes);
2517
2518 #if defined(CONFIG_OPTPROBES)
2519 static int __init init_optprobes(void)
2520 {
2521         /*
2522          * Enable kprobe optimization - this kicks the optimizer which
2523          * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2524          * not spawned in early initcall. So delay the optimization.
2525          */
2526         optimize_all_kprobes();
2527
2528         return 0;
2529 }
2530 subsys_initcall(init_optprobes);
2531 #endif
2532
2533 #ifdef CONFIG_DEBUG_FS
2534 static void report_probe(struct seq_file *pi, struct kprobe *p,
2535                 const char *sym, int offset, char *modname, struct kprobe *pp)
2536 {
2537         char *kprobe_type;
2538         void *addr = p->addr;
2539
2540         if (p->pre_handler == pre_handler_kretprobe)
2541                 kprobe_type = "r";
2542         else
2543                 kprobe_type = "k";
2544
2545         if (!kallsyms_show_value(pi->file->f_cred))
2546                 addr = NULL;
2547
2548         if (sym)
2549                 seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2550                         addr, kprobe_type, sym, offset,
2551                         (modname ? modname : " "));
2552         else    /* try to use %pS */
2553                 seq_printf(pi, "%px  %s  %pS ",
2554                         addr, kprobe_type, p->addr);
2555
2556         if (!pp)
2557                 pp = p;
2558         seq_printf(pi, "%s%s%s%s\n",
2559                 (kprobe_gone(p) ? "[GONE]" : ""),
2560                 ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2561                 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2562                 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2563 }
2564
2565 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2566 {
2567         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2568 }
2569
2570 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2571 {
2572         (*pos)++;
2573         if (*pos >= KPROBE_TABLE_SIZE)
2574                 return NULL;
2575         return pos;
2576 }
2577
2578 static void kprobe_seq_stop(struct seq_file *f, void *v)
2579 {
2580         /* Nothing to do */
2581 }
2582
2583 static int show_kprobe_addr(struct seq_file *pi, void *v)
2584 {
2585         struct hlist_head *head;
2586         struct kprobe *p, *kp;
2587         const char *sym = NULL;
2588         unsigned int i = *(loff_t *) v;
2589         unsigned long offset = 0;
2590         char *modname, namebuf[KSYM_NAME_LEN];
2591
2592         head = &kprobe_table[i];
2593         preempt_disable();
2594         hlist_for_each_entry_rcu(p, head, hlist) {
2595                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2596                                         &offset, &modname, namebuf);
2597                 if (kprobe_aggrprobe(p)) {
2598                         list_for_each_entry_rcu(kp, &p->list, list)
2599                                 report_probe(pi, kp, sym, offset, modname, p);
2600                 } else
2601                         report_probe(pi, p, sym, offset, modname, NULL);
2602         }
2603         preempt_enable();
2604         return 0;
2605 }
2606
2607 static const struct seq_operations kprobes_sops = {
2608         .start = kprobe_seq_start,
2609         .next  = kprobe_seq_next,
2610         .stop  = kprobe_seq_stop,
2611         .show  = show_kprobe_addr
2612 };
2613
2614 DEFINE_SEQ_ATTRIBUTE(kprobes);
2615
2616 /* kprobes/blacklist -- shows which functions can not be probed */
2617 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2618 {
2619         mutex_lock(&kprobe_mutex);
2620         return seq_list_start(&kprobe_blacklist, *pos);
2621 }
2622
2623 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2624 {
2625         return seq_list_next(v, &kprobe_blacklist, pos);
2626 }
2627
2628 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2629 {
2630         struct kprobe_blacklist_entry *ent =
2631                 list_entry(v, struct kprobe_blacklist_entry, list);
2632
2633         /*
2634          * If /proc/kallsyms is not showing kernel address, we won't
2635          * show them here either.
2636          */
2637         if (!kallsyms_show_value(m->file->f_cred))
2638                 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2639                            (void *)ent->start_addr);
2640         else
2641                 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2642                            (void *)ent->end_addr, (void *)ent->start_addr);
2643         return 0;
2644 }
2645
2646 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2647 {
2648         mutex_unlock(&kprobe_mutex);
2649 }
2650
2651 static const struct seq_operations kprobe_blacklist_sops = {
2652         .start = kprobe_blacklist_seq_start,
2653         .next  = kprobe_blacklist_seq_next,
2654         .stop  = kprobe_blacklist_seq_stop,
2655         .show  = kprobe_blacklist_seq_show,
2656 };
2657 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2658
2659 static int arm_all_kprobes(void)
2660 {
2661         struct hlist_head *head;
2662         struct kprobe *p;
2663         unsigned int i, total = 0, errors = 0;
2664         int err, ret = 0;
2665
2666         mutex_lock(&kprobe_mutex);
2667
2668         /* If kprobes are armed, just return */
2669         if (!kprobes_all_disarmed)
2670                 goto already_enabled;
2671
2672         /*
2673          * optimize_kprobe() called by arm_kprobe() checks
2674          * kprobes_all_disarmed, so set kprobes_all_disarmed before
2675          * arm_kprobe.
2676          */
2677         kprobes_all_disarmed = false;
2678         /* Arming kprobes doesn't optimize kprobe itself */
2679         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2680                 head = &kprobe_table[i];
2681                 /* Arm all kprobes on a best-effort basis */
2682                 hlist_for_each_entry(p, head, hlist) {
2683                         if (!kprobe_disabled(p)) {
2684                                 err = arm_kprobe(p);
2685                                 if (err)  {
2686                                         errors++;
2687                                         ret = err;
2688                                 }
2689                                 total++;
2690                         }
2691                 }
2692         }
2693
2694         if (errors)
2695                 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2696                         errors, total);
2697         else
2698                 pr_info("Kprobes globally enabled\n");
2699
2700 already_enabled:
2701         mutex_unlock(&kprobe_mutex);
2702         return ret;
2703 }
2704
2705 static int disarm_all_kprobes(void)
2706 {
2707         struct hlist_head *head;
2708         struct kprobe *p;
2709         unsigned int i, total = 0, errors = 0;
2710         int err, ret = 0;
2711
2712         mutex_lock(&kprobe_mutex);
2713
2714         /* If kprobes are already disarmed, just return */
2715         if (kprobes_all_disarmed) {
2716                 mutex_unlock(&kprobe_mutex);
2717                 return 0;
2718         }
2719
2720         kprobes_all_disarmed = true;
2721
2722         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2723                 head = &kprobe_table[i];
2724                 /* Disarm all kprobes on a best-effort basis */
2725                 hlist_for_each_entry(p, head, hlist) {
2726                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2727                                 err = disarm_kprobe(p, false);
2728                                 if (err) {
2729                                         errors++;
2730                                         ret = err;
2731                                 }
2732                                 total++;
2733                         }
2734                 }
2735         }
2736
2737         if (errors)
2738                 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2739                         errors, total);
2740         else
2741                 pr_info("Kprobes globally disabled\n");
2742
2743         mutex_unlock(&kprobe_mutex);
2744
2745         /* Wait for disarming all kprobes by optimizer */
2746         wait_for_kprobe_optimizer();
2747
2748         return ret;
2749 }
2750
2751 /*
2752  * XXX: The debugfs bool file interface doesn't allow for callbacks
2753  * when the bool state is switched. We can reuse that facility when
2754  * available
2755  */
2756 static ssize_t read_enabled_file_bool(struct file *file,
2757                char __user *user_buf, size_t count, loff_t *ppos)
2758 {
2759         char buf[3];
2760
2761         if (!kprobes_all_disarmed)
2762                 buf[0] = '1';
2763         else
2764                 buf[0] = '0';
2765         buf[1] = '\n';
2766         buf[2] = 0x00;
2767         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2768 }
2769
2770 static ssize_t write_enabled_file_bool(struct file *file,
2771                const char __user *user_buf, size_t count, loff_t *ppos)
2772 {
2773         char buf[32];
2774         size_t buf_size;
2775         int ret = 0;
2776
2777         buf_size = min(count, (sizeof(buf)-1));
2778         if (copy_from_user(buf, user_buf, buf_size))
2779                 return -EFAULT;
2780
2781         buf[buf_size] = '\0';
2782         switch (buf[0]) {
2783         case 'y':
2784         case 'Y':
2785         case '1':
2786                 ret = arm_all_kprobes();
2787                 break;
2788         case 'n':
2789         case 'N':
2790         case '0':
2791                 ret = disarm_all_kprobes();
2792                 break;
2793         default:
2794                 return -EINVAL;
2795         }
2796
2797         if (ret)
2798                 return ret;
2799
2800         return count;
2801 }
2802
2803 static const struct file_operations fops_kp = {
2804         .read =         read_enabled_file_bool,
2805         .write =        write_enabled_file_bool,
2806         .llseek =       default_llseek,
2807 };
2808
2809 static int __init debugfs_kprobe_init(void)
2810 {
2811         struct dentry *dir;
2812         unsigned int value = 1;
2813
2814         dir = debugfs_create_dir("kprobes", NULL);
2815
2816         debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
2817
2818         debugfs_create_file("enabled", 0600, dir, &value, &fops_kp);
2819
2820         debugfs_create_file("blacklist", 0400, dir, NULL,
2821                             &kprobe_blacklist_fops);
2822
2823         return 0;
2824 }
2825
2826 late_initcall(debugfs_kprobe_init);
2827 #endif /* CONFIG_DEBUG_FS */