Merge branch 'kvm/kvm-sls-fix'
[linux-2.6-microblaze.git] / kernel / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  *
7  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8  *              Probes initial implementation (includes suggestions from
9  *              Rusty Russell).
10  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
11  *              hlists and exceptions notifier as suggested by Andi Kleen.
12  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13  *              interface to access function arguments.
14  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
15  *              exceptions notifier to be first on the priority list.
16  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18  *              <prasanna@in.ibm.com> added function-return probes.
19  */
20
21 #define pr_fmt(fmt) "kprobes: " fmt
22
23 #include <linux/kprobes.h>
24 #include <linux/hash.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/stddef.h>
28 #include <linux/export.h>
29 #include <linux/moduleloader.h>
30 #include <linux/kallsyms.h>
31 #include <linux/freezer.h>
32 #include <linux/seq_file.h>
33 #include <linux/debugfs.h>
34 #include <linux/sysctl.h>
35 #include <linux/kdebug.h>
36 #include <linux/memory.h>
37 #include <linux/ftrace.h>
38 #include <linux/cpu.h>
39 #include <linux/jump_label.h>
40 #include <linux/static_call.h>
41 #include <linux/perf_event.h>
42
43 #include <asm/sections.h>
44 #include <asm/cacheflush.h>
45 #include <asm/errno.h>
46 #include <linux/uaccess.h>
47
48 #define KPROBE_HASH_BITS 6
49 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
50
51 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
52 #define kprobe_sysctls_init() do { } while (0)
53 #endif
54
55 static int kprobes_initialized;
56 /* kprobe_table can be accessed by
57  * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
58  * Or
59  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
60  */
61 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
62
63 /* NOTE: change this value only with 'kprobe_mutex' held */
64 static bool kprobes_all_disarmed;
65
66 /* This protects 'kprobe_table' and 'optimizing_list' */
67 static DEFINE_MUTEX(kprobe_mutex);
68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
69
70 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
71                                         unsigned int __unused)
72 {
73         return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
74 }
75
76 /*
77  * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
78  * kprobes can not probe.
79  */
80 static LIST_HEAD(kprobe_blacklist);
81
82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
83 /*
84  * 'kprobe::ainsn.insn' points to the copy of the instruction to be
85  * single-stepped. x86_64, POWER4 and above have no-exec support and
86  * stepping on the instruction on a vmalloced/kmalloced/data page
87  * is a recipe for disaster
88  */
89 struct kprobe_insn_page {
90         struct list_head list;
91         kprobe_opcode_t *insns;         /* Page of instruction slots */
92         struct kprobe_insn_cache *cache;
93         int nused;
94         int ngarbage;
95         char slot_used[];
96 };
97
98 #define KPROBE_INSN_PAGE_SIZE(slots)                    \
99         (offsetof(struct kprobe_insn_page, slot_used) + \
100          (sizeof(char) * (slots)))
101
102 static int slots_per_page(struct kprobe_insn_cache *c)
103 {
104         return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
105 }
106
107 enum kprobe_slot_state {
108         SLOT_CLEAN = 0,
109         SLOT_DIRTY = 1,
110         SLOT_USED = 2,
111 };
112
113 void __weak *alloc_insn_page(void)
114 {
115         /*
116          * Use module_alloc() so this page is within +/- 2GB of where the
117          * kernel image and loaded module images reside. This is required
118          * for most of the architectures.
119          * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
120          */
121         return module_alloc(PAGE_SIZE);
122 }
123
124 static void free_insn_page(void *page)
125 {
126         module_memfree(page);
127 }
128
129 struct kprobe_insn_cache kprobe_insn_slots = {
130         .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
131         .alloc = alloc_insn_page,
132         .free = free_insn_page,
133         .sym = KPROBE_INSN_PAGE_SYM,
134         .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135         .insn_size = MAX_INSN_SIZE,
136         .nr_garbage = 0,
137 };
138 static int collect_garbage_slots(struct kprobe_insn_cache *c);
139
140 /**
141  * __get_insn_slot() - Find a slot on an executable page for an instruction.
142  * We allocate an executable page if there's no room on existing ones.
143  */
144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145 {
146         struct kprobe_insn_page *kip;
147         kprobe_opcode_t *slot = NULL;
148
149         /* Since the slot array is not protected by rcu, we need a mutex */
150         mutex_lock(&c->mutex);
151  retry:
152         rcu_read_lock();
153         list_for_each_entry_rcu(kip, &c->pages, list) {
154                 if (kip->nused < slots_per_page(c)) {
155                         int i;
156
157                         for (i = 0; i < slots_per_page(c); i++) {
158                                 if (kip->slot_used[i] == SLOT_CLEAN) {
159                                         kip->slot_used[i] = SLOT_USED;
160                                         kip->nused++;
161                                         slot = kip->insns + (i * c->insn_size);
162                                         rcu_read_unlock();
163                                         goto out;
164                                 }
165                         }
166                         /* kip->nused is broken. Fix it. */
167                         kip->nused = slots_per_page(c);
168                         WARN_ON(1);
169                 }
170         }
171         rcu_read_unlock();
172
173         /* If there are any garbage slots, collect it and try again. */
174         if (c->nr_garbage && collect_garbage_slots(c) == 0)
175                 goto retry;
176
177         /* All out of space.  Need to allocate a new page. */
178         kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
179         if (!kip)
180                 goto out;
181
182         kip->insns = c->alloc();
183         if (!kip->insns) {
184                 kfree(kip);
185                 goto out;
186         }
187         INIT_LIST_HEAD(&kip->list);
188         memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
189         kip->slot_used[0] = SLOT_USED;
190         kip->nused = 1;
191         kip->ngarbage = 0;
192         kip->cache = c;
193         list_add_rcu(&kip->list, &c->pages);
194         slot = kip->insns;
195
196         /* Record the perf ksymbol register event after adding the page */
197         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
198                            PAGE_SIZE, false, c->sym);
199 out:
200         mutex_unlock(&c->mutex);
201         return slot;
202 }
203
204 /* Return true if all garbages are collected, otherwise false. */
205 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
206 {
207         kip->slot_used[idx] = SLOT_CLEAN;
208         kip->nused--;
209         if (kip->nused == 0) {
210                 /*
211                  * Page is no longer in use.  Free it unless
212                  * it's the last one.  We keep the last one
213                  * so as not to have to set it up again the
214                  * next time somebody inserts a probe.
215                  */
216                 if (!list_is_singular(&kip->list)) {
217                         /*
218                          * Record perf ksymbol unregister event before removing
219                          * the page.
220                          */
221                         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
222                                            (unsigned long)kip->insns, PAGE_SIZE, true,
223                                            kip->cache->sym);
224                         list_del_rcu(&kip->list);
225                         synchronize_rcu();
226                         kip->cache->free(kip->insns);
227                         kfree(kip);
228                 }
229                 return true;
230         }
231         return false;
232 }
233
234 static int collect_garbage_slots(struct kprobe_insn_cache *c)
235 {
236         struct kprobe_insn_page *kip, *next;
237
238         /* Ensure no-one is interrupted on the garbages */
239         synchronize_rcu();
240
241         list_for_each_entry_safe(kip, next, &c->pages, list) {
242                 int i;
243
244                 if (kip->ngarbage == 0)
245                         continue;
246                 kip->ngarbage = 0;      /* we will collect all garbages */
247                 for (i = 0; i < slots_per_page(c); i++) {
248                         if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
249                                 break;
250                 }
251         }
252         c->nr_garbage = 0;
253         return 0;
254 }
255
256 void __free_insn_slot(struct kprobe_insn_cache *c,
257                       kprobe_opcode_t *slot, int dirty)
258 {
259         struct kprobe_insn_page *kip;
260         long idx;
261
262         mutex_lock(&c->mutex);
263         rcu_read_lock();
264         list_for_each_entry_rcu(kip, &c->pages, list) {
265                 idx = ((long)slot - (long)kip->insns) /
266                         (c->insn_size * sizeof(kprobe_opcode_t));
267                 if (idx >= 0 && idx < slots_per_page(c))
268                         goto out;
269         }
270         /* Could not find this slot. */
271         WARN_ON(1);
272         kip = NULL;
273 out:
274         rcu_read_unlock();
275         /* Mark and sweep: this may sleep */
276         if (kip) {
277                 /* Check double free */
278                 WARN_ON(kip->slot_used[idx] != SLOT_USED);
279                 if (dirty) {
280                         kip->slot_used[idx] = SLOT_DIRTY;
281                         kip->ngarbage++;
282                         if (++c->nr_garbage > slots_per_page(c))
283                                 collect_garbage_slots(c);
284                 } else {
285                         collect_one_slot(kip, idx);
286                 }
287         }
288         mutex_unlock(&c->mutex);
289 }
290
291 /*
292  * Check given address is on the page of kprobe instruction slots.
293  * This will be used for checking whether the address on a stack
294  * is on a text area or not.
295  */
296 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
297 {
298         struct kprobe_insn_page *kip;
299         bool ret = false;
300
301         rcu_read_lock();
302         list_for_each_entry_rcu(kip, &c->pages, list) {
303                 if (addr >= (unsigned long)kip->insns &&
304                     addr < (unsigned long)kip->insns + PAGE_SIZE) {
305                         ret = true;
306                         break;
307                 }
308         }
309         rcu_read_unlock();
310
311         return ret;
312 }
313
314 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
315                              unsigned long *value, char *type, char *sym)
316 {
317         struct kprobe_insn_page *kip;
318         int ret = -ERANGE;
319
320         rcu_read_lock();
321         list_for_each_entry_rcu(kip, &c->pages, list) {
322                 if ((*symnum)--)
323                         continue;
324                 strscpy(sym, c->sym, KSYM_NAME_LEN);
325                 *type = 't';
326                 *value = (unsigned long)kip->insns;
327                 ret = 0;
328                 break;
329         }
330         rcu_read_unlock();
331
332         return ret;
333 }
334
335 #ifdef CONFIG_OPTPROBES
336 void __weak *alloc_optinsn_page(void)
337 {
338         return alloc_insn_page();
339 }
340
341 void __weak free_optinsn_page(void *page)
342 {
343         free_insn_page(page);
344 }
345
346 /* For optimized_kprobe buffer */
347 struct kprobe_insn_cache kprobe_optinsn_slots = {
348         .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
349         .alloc = alloc_optinsn_page,
350         .free = free_optinsn_page,
351         .sym = KPROBE_OPTINSN_PAGE_SYM,
352         .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
353         /* .insn_size is initialized later */
354         .nr_garbage = 0,
355 };
356 #endif
357 #endif
358
359 /* We have preemption disabled.. so it is safe to use __ versions */
360 static inline void set_kprobe_instance(struct kprobe *kp)
361 {
362         __this_cpu_write(kprobe_instance, kp);
363 }
364
365 static inline void reset_kprobe_instance(void)
366 {
367         __this_cpu_write(kprobe_instance, NULL);
368 }
369
370 /*
371  * This routine is called either:
372  *      - under the 'kprobe_mutex' - during kprobe_[un]register().
373  *                              OR
374  *      - with preemption disabled - from architecture specific code.
375  */
376 struct kprobe *get_kprobe(void *addr)
377 {
378         struct hlist_head *head;
379         struct kprobe *p;
380
381         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
382         hlist_for_each_entry_rcu(p, head, hlist,
383                                  lockdep_is_held(&kprobe_mutex)) {
384                 if (p->addr == addr)
385                         return p;
386         }
387
388         return NULL;
389 }
390 NOKPROBE_SYMBOL(get_kprobe);
391
392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
393
394 /* Return true if 'p' is an aggregator */
395 static inline bool kprobe_aggrprobe(struct kprobe *p)
396 {
397         return p->pre_handler == aggr_pre_handler;
398 }
399
400 /* Return true if 'p' is unused */
401 static inline bool kprobe_unused(struct kprobe *p)
402 {
403         return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
404                list_empty(&p->list);
405 }
406
407 /* Keep all fields in the kprobe consistent. */
408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
409 {
410         memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
411         memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
412 }
413
414 #ifdef CONFIG_OPTPROBES
415 /* NOTE: This is protected by 'kprobe_mutex'. */
416 static bool kprobes_allow_optimization;
417
418 /*
419  * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
420  * This must be called from arch-dep optimized caller.
421  */
422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
423 {
424         struct kprobe *kp;
425
426         list_for_each_entry_rcu(kp, &p->list, list) {
427                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
428                         set_kprobe_instance(kp);
429                         kp->pre_handler(kp, regs);
430                 }
431                 reset_kprobe_instance();
432         }
433 }
434 NOKPROBE_SYMBOL(opt_pre_handler);
435
436 /* Free optimized instructions and optimized_kprobe */
437 static void free_aggr_kprobe(struct kprobe *p)
438 {
439         struct optimized_kprobe *op;
440
441         op = container_of(p, struct optimized_kprobe, kp);
442         arch_remove_optimized_kprobe(op);
443         arch_remove_kprobe(p);
444         kfree(op);
445 }
446
447 /* Return true if the kprobe is ready for optimization. */
448 static inline int kprobe_optready(struct kprobe *p)
449 {
450         struct optimized_kprobe *op;
451
452         if (kprobe_aggrprobe(p)) {
453                 op = container_of(p, struct optimized_kprobe, kp);
454                 return arch_prepared_optinsn(&op->optinsn);
455         }
456
457         return 0;
458 }
459
460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
461 static inline bool kprobe_disarmed(struct kprobe *p)
462 {
463         struct optimized_kprobe *op;
464
465         /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
466         if (!kprobe_aggrprobe(p))
467                 return kprobe_disabled(p);
468
469         op = container_of(p, struct optimized_kprobe, kp);
470
471         return kprobe_disabled(p) && list_empty(&op->list);
472 }
473
474 /* Return true if the probe is queued on (un)optimizing lists */
475 static bool kprobe_queued(struct kprobe *p)
476 {
477         struct optimized_kprobe *op;
478
479         if (kprobe_aggrprobe(p)) {
480                 op = container_of(p, struct optimized_kprobe, kp);
481                 if (!list_empty(&op->list))
482                         return true;
483         }
484         return false;
485 }
486
487 /*
488  * Return an optimized kprobe whose optimizing code replaces
489  * instructions including 'addr' (exclude breakpoint).
490  */
491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
492 {
493         int i;
494         struct kprobe *p = NULL;
495         struct optimized_kprobe *op;
496
497         /* Don't check i == 0, since that is a breakpoint case. */
498         for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
499                 p = get_kprobe(addr - i);
500
501         if (p && kprobe_optready(p)) {
502                 op = container_of(p, struct optimized_kprobe, kp);
503                 if (arch_within_optimized_kprobe(op, addr))
504                         return p;
505         }
506
507         return NULL;
508 }
509
510 /* Optimization staging list, protected by 'kprobe_mutex' */
511 static LIST_HEAD(optimizing_list);
512 static LIST_HEAD(unoptimizing_list);
513 static LIST_HEAD(freeing_list);
514
515 static void kprobe_optimizer(struct work_struct *work);
516 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
517 #define OPTIMIZE_DELAY 5
518
519 /*
520  * Optimize (replace a breakpoint with a jump) kprobes listed on
521  * 'optimizing_list'.
522  */
523 static void do_optimize_kprobes(void)
524 {
525         lockdep_assert_held(&text_mutex);
526         /*
527          * The optimization/unoptimization refers 'online_cpus' via
528          * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
529          * And same time, 'text_mutex' will be held in cpu-hotplug and here.
530          * This combination can cause a deadlock (cpu-hotplug tries to lock
531          * 'text_mutex' but stop_machine() can not be done because
532          * the 'online_cpus' has been changed)
533          * To avoid this deadlock, caller must have locked cpu-hotplug
534          * for preventing cpu-hotplug outside of 'text_mutex' locking.
535          */
536         lockdep_assert_cpus_held();
537
538         /* Optimization never be done when disarmed */
539         if (kprobes_all_disarmed || !kprobes_allow_optimization ||
540             list_empty(&optimizing_list))
541                 return;
542
543         arch_optimize_kprobes(&optimizing_list);
544 }
545
546 /*
547  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
548  * if need) kprobes listed on 'unoptimizing_list'.
549  */
550 static void do_unoptimize_kprobes(void)
551 {
552         struct optimized_kprobe *op, *tmp;
553
554         lockdep_assert_held(&text_mutex);
555         /* See comment in do_optimize_kprobes() */
556         lockdep_assert_cpus_held();
557
558         /* Unoptimization must be done anytime */
559         if (list_empty(&unoptimizing_list))
560                 return;
561
562         arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
563         /* Loop on 'freeing_list' for disarming */
564         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
565                 /* Switching from detour code to origin */
566                 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
567                 /* Disarm probes if marked disabled */
568                 if (kprobe_disabled(&op->kp))
569                         arch_disarm_kprobe(&op->kp);
570                 if (kprobe_unused(&op->kp)) {
571                         /*
572                          * Remove unused probes from hash list. After waiting
573                          * for synchronization, these probes are reclaimed.
574                          * (reclaiming is done by do_free_cleaned_kprobes().)
575                          */
576                         hlist_del_rcu(&op->kp.hlist);
577                 } else
578                         list_del_init(&op->list);
579         }
580 }
581
582 /* Reclaim all kprobes on the 'freeing_list' */
583 static void do_free_cleaned_kprobes(void)
584 {
585         struct optimized_kprobe *op, *tmp;
586
587         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
588                 list_del_init(&op->list);
589                 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
590                         /*
591                          * This must not happen, but if there is a kprobe
592                          * still in use, keep it on kprobes hash list.
593                          */
594                         continue;
595                 }
596                 free_aggr_kprobe(&op->kp);
597         }
598 }
599
600 /* Start optimizer after OPTIMIZE_DELAY passed */
601 static void kick_kprobe_optimizer(void)
602 {
603         schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
604 }
605
606 /* Kprobe jump optimizer */
607 static void kprobe_optimizer(struct work_struct *work)
608 {
609         mutex_lock(&kprobe_mutex);
610         cpus_read_lock();
611         mutex_lock(&text_mutex);
612
613         /*
614          * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
615          * kprobes before waiting for quiesence period.
616          */
617         do_unoptimize_kprobes();
618
619         /*
620          * Step 2: Wait for quiesence period to ensure all potentially
621          * preempted tasks to have normally scheduled. Because optprobe
622          * may modify multiple instructions, there is a chance that Nth
623          * instruction is preempted. In that case, such tasks can return
624          * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
625          * Note that on non-preemptive kernel, this is transparently converted
626          * to synchronoze_sched() to wait for all interrupts to have completed.
627          */
628         synchronize_rcu_tasks();
629
630         /* Step 3: Optimize kprobes after quiesence period */
631         do_optimize_kprobes();
632
633         /* Step 4: Free cleaned kprobes after quiesence period */
634         do_free_cleaned_kprobes();
635
636         mutex_unlock(&text_mutex);
637         cpus_read_unlock();
638
639         /* Step 5: Kick optimizer again if needed */
640         if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
641                 kick_kprobe_optimizer();
642
643         mutex_unlock(&kprobe_mutex);
644 }
645
646 /* Wait for completing optimization and unoptimization */
647 void wait_for_kprobe_optimizer(void)
648 {
649         mutex_lock(&kprobe_mutex);
650
651         while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
652                 mutex_unlock(&kprobe_mutex);
653
654                 /* This will also make 'optimizing_work' execute immmediately */
655                 flush_delayed_work(&optimizing_work);
656                 /* 'optimizing_work' might not have been queued yet, relax */
657                 cpu_relax();
658
659                 mutex_lock(&kprobe_mutex);
660         }
661
662         mutex_unlock(&kprobe_mutex);
663 }
664
665 static bool optprobe_queued_unopt(struct optimized_kprobe *op)
666 {
667         struct optimized_kprobe *_op;
668
669         list_for_each_entry(_op, &unoptimizing_list, list) {
670                 if (op == _op)
671                         return true;
672         }
673
674         return false;
675 }
676
677 /* Optimize kprobe if p is ready to be optimized */
678 static void optimize_kprobe(struct kprobe *p)
679 {
680         struct optimized_kprobe *op;
681
682         /* Check if the kprobe is disabled or not ready for optimization. */
683         if (!kprobe_optready(p) || !kprobes_allow_optimization ||
684             (kprobe_disabled(p) || kprobes_all_disarmed))
685                 return;
686
687         /* kprobes with 'post_handler' can not be optimized */
688         if (p->post_handler)
689                 return;
690
691         op = container_of(p, struct optimized_kprobe, kp);
692
693         /* Check there is no other kprobes at the optimized instructions */
694         if (arch_check_optimized_kprobe(op) < 0)
695                 return;
696
697         /* Check if it is already optimized. */
698         if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
699                 if (optprobe_queued_unopt(op)) {
700                         /* This is under unoptimizing. Just dequeue the probe */
701                         list_del_init(&op->list);
702                 }
703                 return;
704         }
705         op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
706
707         /*
708          * On the 'unoptimizing_list' and 'optimizing_list',
709          * 'op' must have OPTIMIZED flag
710          */
711         if (WARN_ON_ONCE(!list_empty(&op->list)))
712                 return;
713
714         list_add(&op->list, &optimizing_list);
715         kick_kprobe_optimizer();
716 }
717
718 /* Short cut to direct unoptimizing */
719 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
720 {
721         lockdep_assert_cpus_held();
722         arch_unoptimize_kprobe(op);
723         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
724 }
725
726 /* Unoptimize a kprobe if p is optimized */
727 static void unoptimize_kprobe(struct kprobe *p, bool force)
728 {
729         struct optimized_kprobe *op;
730
731         if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
732                 return; /* This is not an optprobe nor optimized */
733
734         op = container_of(p, struct optimized_kprobe, kp);
735         if (!kprobe_optimized(p))
736                 return;
737
738         if (!list_empty(&op->list)) {
739                 if (optprobe_queued_unopt(op)) {
740                         /* Queued in unoptimizing queue */
741                         if (force) {
742                                 /*
743                                  * Forcibly unoptimize the kprobe here, and queue it
744                                  * in the freeing list for release afterwards.
745                                  */
746                                 force_unoptimize_kprobe(op);
747                                 list_move(&op->list, &freeing_list);
748                         }
749                 } else {
750                         /* Dequeue from the optimizing queue */
751                         list_del_init(&op->list);
752                         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
753                 }
754                 return;
755         }
756
757         /* Optimized kprobe case */
758         if (force) {
759                 /* Forcibly update the code: this is a special case */
760                 force_unoptimize_kprobe(op);
761         } else {
762                 list_add(&op->list, &unoptimizing_list);
763                 kick_kprobe_optimizer();
764         }
765 }
766
767 /* Cancel unoptimizing for reusing */
768 static int reuse_unused_kprobe(struct kprobe *ap)
769 {
770         struct optimized_kprobe *op;
771
772         /*
773          * Unused kprobe MUST be on the way of delayed unoptimizing (means
774          * there is still a relative jump) and disabled.
775          */
776         op = container_of(ap, struct optimized_kprobe, kp);
777         WARN_ON_ONCE(list_empty(&op->list));
778         /* Enable the probe again */
779         ap->flags &= ~KPROBE_FLAG_DISABLED;
780         /* Optimize it again. (remove from 'op->list') */
781         if (!kprobe_optready(ap))
782                 return -EINVAL;
783
784         optimize_kprobe(ap);
785         return 0;
786 }
787
788 /* Remove optimized instructions */
789 static void kill_optimized_kprobe(struct kprobe *p)
790 {
791         struct optimized_kprobe *op;
792
793         op = container_of(p, struct optimized_kprobe, kp);
794         if (!list_empty(&op->list))
795                 /* Dequeue from the (un)optimization queue */
796                 list_del_init(&op->list);
797         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
798
799         if (kprobe_unused(p)) {
800                 /* Enqueue if it is unused */
801                 list_add(&op->list, &freeing_list);
802                 /*
803                  * Remove unused probes from the hash list. After waiting
804                  * for synchronization, this probe is reclaimed.
805                  * (reclaiming is done by do_free_cleaned_kprobes().)
806                  */
807                 hlist_del_rcu(&op->kp.hlist);
808         }
809
810         /* Don't touch the code, because it is already freed. */
811         arch_remove_optimized_kprobe(op);
812 }
813
814 static inline
815 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
816 {
817         if (!kprobe_ftrace(p))
818                 arch_prepare_optimized_kprobe(op, p);
819 }
820
821 /* Try to prepare optimized instructions */
822 static void prepare_optimized_kprobe(struct kprobe *p)
823 {
824         struct optimized_kprobe *op;
825
826         op = container_of(p, struct optimized_kprobe, kp);
827         __prepare_optimized_kprobe(op, p);
828 }
829
830 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */
831 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
832 {
833         struct optimized_kprobe *op;
834
835         op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
836         if (!op)
837                 return NULL;
838
839         INIT_LIST_HEAD(&op->list);
840         op->kp.addr = p->addr;
841         __prepare_optimized_kprobe(op, p);
842
843         return &op->kp;
844 }
845
846 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
847
848 /*
849  * Prepare an optimized_kprobe and optimize it.
850  * NOTE: 'p' must be a normal registered kprobe.
851  */
852 static void try_to_optimize_kprobe(struct kprobe *p)
853 {
854         struct kprobe *ap;
855         struct optimized_kprobe *op;
856
857         /* Impossible to optimize ftrace-based kprobe. */
858         if (kprobe_ftrace(p))
859                 return;
860
861         /* For preparing optimization, jump_label_text_reserved() is called. */
862         cpus_read_lock();
863         jump_label_lock();
864         mutex_lock(&text_mutex);
865
866         ap = alloc_aggr_kprobe(p);
867         if (!ap)
868                 goto out;
869
870         op = container_of(ap, struct optimized_kprobe, kp);
871         if (!arch_prepared_optinsn(&op->optinsn)) {
872                 /* If failed to setup optimizing, fallback to kprobe. */
873                 arch_remove_optimized_kprobe(op);
874                 kfree(op);
875                 goto out;
876         }
877
878         init_aggr_kprobe(ap, p);
879         optimize_kprobe(ap);    /* This just kicks optimizer thread. */
880
881 out:
882         mutex_unlock(&text_mutex);
883         jump_label_unlock();
884         cpus_read_unlock();
885 }
886
887 static void optimize_all_kprobes(void)
888 {
889         struct hlist_head *head;
890         struct kprobe *p;
891         unsigned int i;
892
893         mutex_lock(&kprobe_mutex);
894         /* If optimization is already allowed, just return. */
895         if (kprobes_allow_optimization)
896                 goto out;
897
898         cpus_read_lock();
899         kprobes_allow_optimization = true;
900         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
901                 head = &kprobe_table[i];
902                 hlist_for_each_entry(p, head, hlist)
903                         if (!kprobe_disabled(p))
904                                 optimize_kprobe(p);
905         }
906         cpus_read_unlock();
907         pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
908 out:
909         mutex_unlock(&kprobe_mutex);
910 }
911
912 #ifdef CONFIG_SYSCTL
913 static void unoptimize_all_kprobes(void)
914 {
915         struct hlist_head *head;
916         struct kprobe *p;
917         unsigned int i;
918
919         mutex_lock(&kprobe_mutex);
920         /* If optimization is already prohibited, just return. */
921         if (!kprobes_allow_optimization) {
922                 mutex_unlock(&kprobe_mutex);
923                 return;
924         }
925
926         cpus_read_lock();
927         kprobes_allow_optimization = false;
928         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
929                 head = &kprobe_table[i];
930                 hlist_for_each_entry(p, head, hlist) {
931                         if (!kprobe_disabled(p))
932                                 unoptimize_kprobe(p, false);
933                 }
934         }
935         cpus_read_unlock();
936         mutex_unlock(&kprobe_mutex);
937
938         /* Wait for unoptimizing completion. */
939         wait_for_kprobe_optimizer();
940         pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
941 }
942
943 static DEFINE_MUTEX(kprobe_sysctl_mutex);
944 static int sysctl_kprobes_optimization;
945 static int proc_kprobes_optimization_handler(struct ctl_table *table,
946                                              int write, void *buffer,
947                                              size_t *length, loff_t *ppos)
948 {
949         int ret;
950
951         mutex_lock(&kprobe_sysctl_mutex);
952         sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
953         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
954
955         if (sysctl_kprobes_optimization)
956                 optimize_all_kprobes();
957         else
958                 unoptimize_all_kprobes();
959         mutex_unlock(&kprobe_sysctl_mutex);
960
961         return ret;
962 }
963
964 static struct ctl_table kprobe_sysctls[] = {
965         {
966                 .procname       = "kprobes-optimization",
967                 .data           = &sysctl_kprobes_optimization,
968                 .maxlen         = sizeof(int),
969                 .mode           = 0644,
970                 .proc_handler   = proc_kprobes_optimization_handler,
971                 .extra1         = SYSCTL_ZERO,
972                 .extra2         = SYSCTL_ONE,
973         },
974         {}
975 };
976
977 static void __init kprobe_sysctls_init(void)
978 {
979         register_sysctl_init("debug", kprobe_sysctls);
980 }
981 #endif /* CONFIG_SYSCTL */
982
983 /* Put a breakpoint for a probe. */
984 static void __arm_kprobe(struct kprobe *p)
985 {
986         struct kprobe *_p;
987
988         lockdep_assert_held(&text_mutex);
989
990         /* Find the overlapping optimized kprobes. */
991         _p = get_optimized_kprobe(p->addr);
992         if (unlikely(_p))
993                 /* Fallback to unoptimized kprobe */
994                 unoptimize_kprobe(_p, true);
995
996         arch_arm_kprobe(p);
997         optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
998 }
999
1000 /* Remove the breakpoint of a probe. */
1001 static void __disarm_kprobe(struct kprobe *p, bool reopt)
1002 {
1003         struct kprobe *_p;
1004
1005         lockdep_assert_held(&text_mutex);
1006
1007         /* Try to unoptimize */
1008         unoptimize_kprobe(p, kprobes_all_disarmed);
1009
1010         if (!kprobe_queued(p)) {
1011                 arch_disarm_kprobe(p);
1012                 /* If another kprobe was blocked, re-optimize it. */
1013                 _p = get_optimized_kprobe(p->addr);
1014                 if (unlikely(_p) && reopt)
1015                         optimize_kprobe(_p);
1016         }
1017         /*
1018          * TODO: Since unoptimization and real disarming will be done by
1019          * the worker thread, we can not check whether another probe are
1020          * unoptimized because of this probe here. It should be re-optimized
1021          * by the worker thread.
1022          */
1023 }
1024
1025 #else /* !CONFIG_OPTPROBES */
1026
1027 #define optimize_kprobe(p)                      do {} while (0)
1028 #define unoptimize_kprobe(p, f)                 do {} while (0)
1029 #define kill_optimized_kprobe(p)                do {} while (0)
1030 #define prepare_optimized_kprobe(p)             do {} while (0)
1031 #define try_to_optimize_kprobe(p)               do {} while (0)
1032 #define __arm_kprobe(p)                         arch_arm_kprobe(p)
1033 #define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
1034 #define kprobe_disarmed(p)                      kprobe_disabled(p)
1035 #define wait_for_kprobe_optimizer()             do {} while (0)
1036
1037 static int reuse_unused_kprobe(struct kprobe *ap)
1038 {
1039         /*
1040          * If the optimized kprobe is NOT supported, the aggr kprobe is
1041          * released at the same time that the last aggregated kprobe is
1042          * unregistered.
1043          * Thus there should be no chance to reuse unused kprobe.
1044          */
1045         WARN_ON_ONCE(1);
1046         return -EINVAL;
1047 }
1048
1049 static void free_aggr_kprobe(struct kprobe *p)
1050 {
1051         arch_remove_kprobe(p);
1052         kfree(p);
1053 }
1054
1055 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1056 {
1057         return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1058 }
1059 #endif /* CONFIG_OPTPROBES */
1060
1061 #ifdef CONFIG_KPROBES_ON_FTRACE
1062 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1063         .func = kprobe_ftrace_handler,
1064         .flags = FTRACE_OPS_FL_SAVE_REGS,
1065 };
1066
1067 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1068         .func = kprobe_ftrace_handler,
1069         .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1070 };
1071
1072 static int kprobe_ipmodify_enabled;
1073 static int kprobe_ftrace_enabled;
1074
1075 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1076                                int *cnt)
1077 {
1078         int ret = 0;
1079
1080         lockdep_assert_held(&kprobe_mutex);
1081
1082         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1083         if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
1084                 return ret;
1085
1086         if (*cnt == 0) {
1087                 ret = register_ftrace_function(ops);
1088                 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
1089                         goto err_ftrace;
1090         }
1091
1092         (*cnt)++;
1093         return ret;
1094
1095 err_ftrace:
1096         /*
1097          * At this point, sinec ops is not registered, we should be sefe from
1098          * registering empty filter.
1099          */
1100         ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1101         return ret;
1102 }
1103
1104 static int arm_kprobe_ftrace(struct kprobe *p)
1105 {
1106         bool ipmodify = (p->post_handler != NULL);
1107
1108         return __arm_kprobe_ftrace(p,
1109                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1110                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1111 }
1112
1113 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1114                                   int *cnt)
1115 {
1116         int ret = 0;
1117
1118         lockdep_assert_held(&kprobe_mutex);
1119
1120         if (*cnt == 1) {
1121                 ret = unregister_ftrace_function(ops);
1122                 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
1123                         return ret;
1124         }
1125
1126         (*cnt)--;
1127
1128         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1129         WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
1130                   p->addr, ret);
1131         return ret;
1132 }
1133
1134 static int disarm_kprobe_ftrace(struct kprobe *p)
1135 {
1136         bool ipmodify = (p->post_handler != NULL);
1137
1138         return __disarm_kprobe_ftrace(p,
1139                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1140                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1141 }
1142 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1143 static inline int arm_kprobe_ftrace(struct kprobe *p)
1144 {
1145         return -ENODEV;
1146 }
1147
1148 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1149 {
1150         return -ENODEV;
1151 }
1152 #endif
1153
1154 static int prepare_kprobe(struct kprobe *p)
1155 {
1156         /* Must ensure p->addr is really on ftrace */
1157         if (kprobe_ftrace(p))
1158                 return arch_prepare_kprobe_ftrace(p);
1159
1160         return arch_prepare_kprobe(p);
1161 }
1162
1163 static int arm_kprobe(struct kprobe *kp)
1164 {
1165         if (unlikely(kprobe_ftrace(kp)))
1166                 return arm_kprobe_ftrace(kp);
1167
1168         cpus_read_lock();
1169         mutex_lock(&text_mutex);
1170         __arm_kprobe(kp);
1171         mutex_unlock(&text_mutex);
1172         cpus_read_unlock();
1173
1174         return 0;
1175 }
1176
1177 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1178 {
1179         if (unlikely(kprobe_ftrace(kp)))
1180                 return disarm_kprobe_ftrace(kp);
1181
1182         cpus_read_lock();
1183         mutex_lock(&text_mutex);
1184         __disarm_kprobe(kp, reopt);
1185         mutex_unlock(&text_mutex);
1186         cpus_read_unlock();
1187
1188         return 0;
1189 }
1190
1191 /*
1192  * Aggregate handlers for multiple kprobes support - these handlers
1193  * take care of invoking the individual kprobe handlers on p->list
1194  */
1195 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1196 {
1197         struct kprobe *kp;
1198
1199         list_for_each_entry_rcu(kp, &p->list, list) {
1200                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1201                         set_kprobe_instance(kp);
1202                         if (kp->pre_handler(kp, regs))
1203                                 return 1;
1204                 }
1205                 reset_kprobe_instance();
1206         }
1207         return 0;
1208 }
1209 NOKPROBE_SYMBOL(aggr_pre_handler);
1210
1211 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1212                               unsigned long flags)
1213 {
1214         struct kprobe *kp;
1215
1216         list_for_each_entry_rcu(kp, &p->list, list) {
1217                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1218                         set_kprobe_instance(kp);
1219                         kp->post_handler(kp, regs, flags);
1220                         reset_kprobe_instance();
1221                 }
1222         }
1223 }
1224 NOKPROBE_SYMBOL(aggr_post_handler);
1225
1226 /* Walks the list and increments 'nmissed' if 'p' has child probes. */
1227 void kprobes_inc_nmissed_count(struct kprobe *p)
1228 {
1229         struct kprobe *kp;
1230
1231         if (!kprobe_aggrprobe(p)) {
1232                 p->nmissed++;
1233         } else {
1234                 list_for_each_entry_rcu(kp, &p->list, list)
1235                         kp->nmissed++;
1236         }
1237 }
1238 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1239
1240 static void free_rp_inst_rcu(struct rcu_head *head)
1241 {
1242         struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
1243
1244         if (refcount_dec_and_test(&ri->rph->ref))
1245                 kfree(ri->rph);
1246         kfree(ri);
1247 }
1248 NOKPROBE_SYMBOL(free_rp_inst_rcu);
1249
1250 static void recycle_rp_inst(struct kretprobe_instance *ri)
1251 {
1252         struct kretprobe *rp = get_kretprobe(ri);
1253
1254         if (likely(rp))
1255                 freelist_add(&ri->freelist, &rp->freelist);
1256         else
1257                 call_rcu(&ri->rcu, free_rp_inst_rcu);
1258 }
1259 NOKPROBE_SYMBOL(recycle_rp_inst);
1260
1261 static struct kprobe kprobe_busy = {
1262         .addr = (void *) get_kprobe,
1263 };
1264
1265 void kprobe_busy_begin(void)
1266 {
1267         struct kprobe_ctlblk *kcb;
1268
1269         preempt_disable();
1270         __this_cpu_write(current_kprobe, &kprobe_busy);
1271         kcb = get_kprobe_ctlblk();
1272         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1273 }
1274
1275 void kprobe_busy_end(void)
1276 {
1277         __this_cpu_write(current_kprobe, NULL);
1278         preempt_enable();
1279 }
1280
1281 /*
1282  * This function is called from delayed_put_task_struct() when a task is
1283  * dead and cleaned up to recycle any kretprobe instances associated with
1284  * this task. These left over instances represent probed functions that
1285  * have been called but will never return.
1286  */
1287 void kprobe_flush_task(struct task_struct *tk)
1288 {
1289         struct kretprobe_instance *ri;
1290         struct llist_node *node;
1291
1292         /* Early boot, not yet initialized. */
1293         if (unlikely(!kprobes_initialized))
1294                 return;
1295
1296         kprobe_busy_begin();
1297
1298         node = __llist_del_all(&tk->kretprobe_instances);
1299         while (node) {
1300                 ri = container_of(node, struct kretprobe_instance, llist);
1301                 node = node->next;
1302
1303                 recycle_rp_inst(ri);
1304         }
1305
1306         kprobe_busy_end();
1307 }
1308 NOKPROBE_SYMBOL(kprobe_flush_task);
1309
1310 static inline void free_rp_inst(struct kretprobe *rp)
1311 {
1312         struct kretprobe_instance *ri;
1313         struct freelist_node *node;
1314         int count = 0;
1315
1316         node = rp->freelist.head;
1317         while (node) {
1318                 ri = container_of(node, struct kretprobe_instance, freelist);
1319                 node = node->next;
1320
1321                 kfree(ri);
1322                 count++;
1323         }
1324
1325         if (refcount_sub_and_test(count, &rp->rph->ref)) {
1326                 kfree(rp->rph);
1327                 rp->rph = NULL;
1328         }
1329 }
1330
1331 /* Add the new probe to 'ap->list'. */
1332 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1333 {
1334         if (p->post_handler)
1335                 unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1336
1337         list_add_rcu(&p->list, &ap->list);
1338         if (p->post_handler && !ap->post_handler)
1339                 ap->post_handler = aggr_post_handler;
1340
1341         return 0;
1342 }
1343
1344 /*
1345  * Fill in the required fields of the aggregator kprobe. Replace the
1346  * earlier kprobe in the hlist with the aggregator kprobe.
1347  */
1348 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1349 {
1350         /* Copy the insn slot of 'p' to 'ap'. */
1351         copy_kprobe(p, ap);
1352         flush_insn_slot(ap);
1353         ap->addr = p->addr;
1354         ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1355         ap->pre_handler = aggr_pre_handler;
1356         /* We don't care the kprobe which has gone. */
1357         if (p->post_handler && !kprobe_gone(p))
1358                 ap->post_handler = aggr_post_handler;
1359
1360         INIT_LIST_HEAD(&ap->list);
1361         INIT_HLIST_NODE(&ap->hlist);
1362
1363         list_add_rcu(&p->list, &ap->list);
1364         hlist_replace_rcu(&p->hlist, &ap->hlist);
1365 }
1366
1367 /*
1368  * This registers the second or subsequent kprobe at the same address.
1369  */
1370 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1371 {
1372         int ret = 0;
1373         struct kprobe *ap = orig_p;
1374
1375         cpus_read_lock();
1376
1377         /* For preparing optimization, jump_label_text_reserved() is called */
1378         jump_label_lock();
1379         mutex_lock(&text_mutex);
1380
1381         if (!kprobe_aggrprobe(orig_p)) {
1382                 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
1383                 ap = alloc_aggr_kprobe(orig_p);
1384                 if (!ap) {
1385                         ret = -ENOMEM;
1386                         goto out;
1387                 }
1388                 init_aggr_kprobe(ap, orig_p);
1389         } else if (kprobe_unused(ap)) {
1390                 /* This probe is going to die. Rescue it */
1391                 ret = reuse_unused_kprobe(ap);
1392                 if (ret)
1393                         goto out;
1394         }
1395
1396         if (kprobe_gone(ap)) {
1397                 /*
1398                  * Attempting to insert new probe at the same location that
1399                  * had a probe in the module vaddr area which already
1400                  * freed. So, the instruction slot has already been
1401                  * released. We need a new slot for the new probe.
1402                  */
1403                 ret = arch_prepare_kprobe(ap);
1404                 if (ret)
1405                         /*
1406                          * Even if fail to allocate new slot, don't need to
1407                          * free the 'ap'. It will be used next time, or
1408                          * freed by unregister_kprobe().
1409                          */
1410                         goto out;
1411
1412                 /* Prepare optimized instructions if possible. */
1413                 prepare_optimized_kprobe(ap);
1414
1415                 /*
1416                  * Clear gone flag to prevent allocating new slot again, and
1417                  * set disabled flag because it is not armed yet.
1418                  */
1419                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1420                             | KPROBE_FLAG_DISABLED;
1421         }
1422
1423         /* Copy the insn slot of 'p' to 'ap'. */
1424         copy_kprobe(ap, p);
1425         ret = add_new_kprobe(ap, p);
1426
1427 out:
1428         mutex_unlock(&text_mutex);
1429         jump_label_unlock();
1430         cpus_read_unlock();
1431
1432         if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1433                 ap->flags &= ~KPROBE_FLAG_DISABLED;
1434                 if (!kprobes_all_disarmed) {
1435                         /* Arm the breakpoint again. */
1436                         ret = arm_kprobe(ap);
1437                         if (ret) {
1438                                 ap->flags |= KPROBE_FLAG_DISABLED;
1439                                 list_del_rcu(&p->list);
1440                                 synchronize_rcu();
1441                         }
1442                 }
1443         }
1444         return ret;
1445 }
1446
1447 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1448 {
1449         /* The '__kprobes' functions and entry code must not be probed. */
1450         return addr >= (unsigned long)__kprobes_text_start &&
1451                addr < (unsigned long)__kprobes_text_end;
1452 }
1453
1454 static bool __within_kprobe_blacklist(unsigned long addr)
1455 {
1456         struct kprobe_blacklist_entry *ent;
1457
1458         if (arch_within_kprobe_blacklist(addr))
1459                 return true;
1460         /*
1461          * If 'kprobe_blacklist' is defined, check the address and
1462          * reject any probe registration in the prohibited area.
1463          */
1464         list_for_each_entry(ent, &kprobe_blacklist, list) {
1465                 if (addr >= ent->start_addr && addr < ent->end_addr)
1466                         return true;
1467         }
1468         return false;
1469 }
1470
1471 bool within_kprobe_blacklist(unsigned long addr)
1472 {
1473         char symname[KSYM_NAME_LEN], *p;
1474
1475         if (__within_kprobe_blacklist(addr))
1476                 return true;
1477
1478         /* Check if the address is on a suffixed-symbol */
1479         if (!lookup_symbol_name(addr, symname)) {
1480                 p = strchr(symname, '.');
1481                 if (!p)
1482                         return false;
1483                 *p = '\0';
1484                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1485                 if (addr)
1486                         return __within_kprobe_blacklist(addr);
1487         }
1488         return false;
1489 }
1490
1491 /*
1492  * arch_adjust_kprobe_addr - adjust the address
1493  * @addr: symbol base address
1494  * @offset: offset within the symbol
1495  * @on_func_entry: was this @addr+@offset on the function entry
1496  *
1497  * Typically returns @addr + @offset, except for special cases where the
1498  * function might be prefixed by a CFI landing pad, in that case any offset
1499  * inside the landing pad is mapped to the first 'real' instruction of the
1500  * symbol.
1501  *
1502  * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1503  * instruction at +0.
1504  */
1505 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1506                                                 unsigned long offset,
1507                                                 bool *on_func_entry)
1508 {
1509         *on_func_entry = !offset;
1510         return (kprobe_opcode_t *)(addr + offset);
1511 }
1512
1513 /*
1514  * If 'symbol_name' is specified, look it up and add the 'offset'
1515  * to it. This way, we can specify a relative address to a symbol.
1516  * This returns encoded errors if it fails to look up symbol or invalid
1517  * combination of parameters.
1518  */
1519 static kprobe_opcode_t *
1520 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1521              unsigned long offset, bool *on_func_entry)
1522 {
1523         if ((symbol_name && addr) || (!symbol_name && !addr))
1524                 goto invalid;
1525
1526         if (symbol_name) {
1527                 /*
1528                  * Input: @sym + @offset
1529                  * Output: @addr + @offset
1530                  *
1531                  * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1532                  *       argument into it's output!
1533                  */
1534                 addr = kprobe_lookup_name(symbol_name, offset);
1535                 if (!addr)
1536                         return ERR_PTR(-ENOENT);
1537         }
1538
1539         /*
1540          * So here we have @addr + @offset, displace it into a new
1541          * @addr' + @offset' where @addr' is the symbol start address.
1542          */
1543         addr = (void *)addr + offset;
1544         if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1545                 return ERR_PTR(-ENOENT);
1546         addr = (void *)addr - offset;
1547
1548         /*
1549          * Then ask the architecture to re-combine them, taking care of
1550          * magical function entry details while telling us if this was indeed
1551          * at the start of the function.
1552          */
1553         addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
1554         if (addr)
1555                 return addr;
1556
1557 invalid:
1558         return ERR_PTR(-EINVAL);
1559 }
1560
1561 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1562 {
1563         bool on_func_entry;
1564         return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1565 }
1566
1567 /*
1568  * Check the 'p' is valid and return the aggregator kprobe
1569  * at the same address.
1570  */
1571 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1572 {
1573         struct kprobe *ap, *list_p;
1574
1575         lockdep_assert_held(&kprobe_mutex);
1576
1577         ap = get_kprobe(p->addr);
1578         if (unlikely(!ap))
1579                 return NULL;
1580
1581         if (p != ap) {
1582                 list_for_each_entry(list_p, &ap->list, list)
1583                         if (list_p == p)
1584                         /* kprobe p is a valid probe */
1585                                 goto valid;
1586                 return NULL;
1587         }
1588 valid:
1589         return ap;
1590 }
1591
1592 /*
1593  * Warn and return error if the kprobe is being re-registered since
1594  * there must be a software bug.
1595  */
1596 static inline int warn_kprobe_rereg(struct kprobe *p)
1597 {
1598         int ret = 0;
1599
1600         mutex_lock(&kprobe_mutex);
1601         if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1602                 ret = -EINVAL;
1603         mutex_unlock(&kprobe_mutex);
1604
1605         return ret;
1606 }
1607
1608 static int check_ftrace_location(struct kprobe *p)
1609 {
1610         unsigned long addr = (unsigned long)p->addr;
1611
1612         if (ftrace_location(addr) == addr) {
1613 #ifdef CONFIG_KPROBES_ON_FTRACE
1614                 p->flags |= KPROBE_FLAG_FTRACE;
1615 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1616                 return -EINVAL;
1617 #endif
1618         }
1619         return 0;
1620 }
1621
1622 static int check_kprobe_address_safe(struct kprobe *p,
1623                                      struct module **probed_mod)
1624 {
1625         int ret;
1626
1627         ret = check_ftrace_location(p);
1628         if (ret)
1629                 return ret;
1630         jump_label_lock();
1631         preempt_disable();
1632
1633         /* Ensure it is not in reserved area nor out of text */
1634         if (!kernel_text_address((unsigned long) p->addr) ||
1635             within_kprobe_blacklist((unsigned long) p->addr) ||
1636             jump_label_text_reserved(p->addr, p->addr) ||
1637             static_call_text_reserved(p->addr, p->addr) ||
1638             find_bug((unsigned long)p->addr)) {
1639                 ret = -EINVAL;
1640                 goto out;
1641         }
1642
1643         /* Check if 'p' is probing a module. */
1644         *probed_mod = __module_text_address((unsigned long) p->addr);
1645         if (*probed_mod) {
1646                 /*
1647                  * We must hold a refcount of the probed module while updating
1648                  * its code to prohibit unexpected unloading.
1649                  */
1650                 if (unlikely(!try_module_get(*probed_mod))) {
1651                         ret = -ENOENT;
1652                         goto out;
1653                 }
1654
1655                 /*
1656                  * If the module freed '.init.text', we couldn't insert
1657                  * kprobes in there.
1658                  */
1659                 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1660                     (*probed_mod)->state != MODULE_STATE_COMING) {
1661                         module_put(*probed_mod);
1662                         *probed_mod = NULL;
1663                         ret = -ENOENT;
1664                 }
1665         }
1666 out:
1667         preempt_enable();
1668         jump_label_unlock();
1669
1670         return ret;
1671 }
1672
1673 int register_kprobe(struct kprobe *p)
1674 {
1675         int ret;
1676         struct kprobe *old_p;
1677         struct module *probed_mod;
1678         kprobe_opcode_t *addr;
1679
1680         /* Adjust probe address from symbol */
1681         addr = kprobe_addr(p);
1682         if (IS_ERR(addr))
1683                 return PTR_ERR(addr);
1684         p->addr = addr;
1685
1686         ret = warn_kprobe_rereg(p);
1687         if (ret)
1688                 return ret;
1689
1690         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1691         p->flags &= KPROBE_FLAG_DISABLED;
1692         p->nmissed = 0;
1693         INIT_LIST_HEAD(&p->list);
1694
1695         ret = check_kprobe_address_safe(p, &probed_mod);
1696         if (ret)
1697                 return ret;
1698
1699         mutex_lock(&kprobe_mutex);
1700
1701         old_p = get_kprobe(p->addr);
1702         if (old_p) {
1703                 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
1704                 ret = register_aggr_kprobe(old_p, p);
1705                 goto out;
1706         }
1707
1708         cpus_read_lock();
1709         /* Prevent text modification */
1710         mutex_lock(&text_mutex);
1711         ret = prepare_kprobe(p);
1712         mutex_unlock(&text_mutex);
1713         cpus_read_unlock();
1714         if (ret)
1715                 goto out;
1716
1717         INIT_HLIST_NODE(&p->hlist);
1718         hlist_add_head_rcu(&p->hlist,
1719                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1720
1721         if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1722                 ret = arm_kprobe(p);
1723                 if (ret) {
1724                         hlist_del_rcu(&p->hlist);
1725                         synchronize_rcu();
1726                         goto out;
1727                 }
1728         }
1729
1730         /* Try to optimize kprobe */
1731         try_to_optimize_kprobe(p);
1732 out:
1733         mutex_unlock(&kprobe_mutex);
1734
1735         if (probed_mod)
1736                 module_put(probed_mod);
1737
1738         return ret;
1739 }
1740 EXPORT_SYMBOL_GPL(register_kprobe);
1741
1742 /* Check if all probes on the 'ap' are disabled. */
1743 static bool aggr_kprobe_disabled(struct kprobe *ap)
1744 {
1745         struct kprobe *kp;
1746
1747         lockdep_assert_held(&kprobe_mutex);
1748
1749         list_for_each_entry(kp, &ap->list, list)
1750                 if (!kprobe_disabled(kp))
1751                         /*
1752                          * Since there is an active probe on the list,
1753                          * we can't disable this 'ap'.
1754                          */
1755                         return false;
1756
1757         return true;
1758 }
1759
1760 static struct kprobe *__disable_kprobe(struct kprobe *p)
1761 {
1762         struct kprobe *orig_p;
1763         int ret;
1764
1765         lockdep_assert_held(&kprobe_mutex);
1766
1767         /* Get an original kprobe for return */
1768         orig_p = __get_valid_kprobe(p);
1769         if (unlikely(orig_p == NULL))
1770                 return ERR_PTR(-EINVAL);
1771
1772         if (!kprobe_disabled(p)) {
1773                 /* Disable probe if it is a child probe */
1774                 if (p != orig_p)
1775                         p->flags |= KPROBE_FLAG_DISABLED;
1776
1777                 /* Try to disarm and disable this/parent probe */
1778                 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1779                         /*
1780                          * If 'kprobes_all_disarmed' is set, 'orig_p'
1781                          * should have already been disarmed, so
1782                          * skip unneed disarming process.
1783                          */
1784                         if (!kprobes_all_disarmed) {
1785                                 ret = disarm_kprobe(orig_p, true);
1786                                 if (ret) {
1787                                         p->flags &= ~KPROBE_FLAG_DISABLED;
1788                                         return ERR_PTR(ret);
1789                                 }
1790                         }
1791                         orig_p->flags |= KPROBE_FLAG_DISABLED;
1792                 }
1793         }
1794
1795         return orig_p;
1796 }
1797
1798 /*
1799  * Unregister a kprobe without a scheduler synchronization.
1800  */
1801 static int __unregister_kprobe_top(struct kprobe *p)
1802 {
1803         struct kprobe *ap, *list_p;
1804
1805         /* Disable kprobe. This will disarm it if needed. */
1806         ap = __disable_kprobe(p);
1807         if (IS_ERR(ap))
1808                 return PTR_ERR(ap);
1809
1810         if (ap == p)
1811                 /*
1812                  * This probe is an independent(and non-optimized) kprobe
1813                  * (not an aggrprobe). Remove from the hash list.
1814                  */
1815                 goto disarmed;
1816
1817         /* Following process expects this probe is an aggrprobe */
1818         WARN_ON(!kprobe_aggrprobe(ap));
1819
1820         if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1821                 /*
1822                  * !disarmed could be happen if the probe is under delayed
1823                  * unoptimizing.
1824                  */
1825                 goto disarmed;
1826         else {
1827                 /* If disabling probe has special handlers, update aggrprobe */
1828                 if (p->post_handler && !kprobe_gone(p)) {
1829                         list_for_each_entry(list_p, &ap->list, list) {
1830                                 if ((list_p != p) && (list_p->post_handler))
1831                                         goto noclean;
1832                         }
1833                         ap->post_handler = NULL;
1834                 }
1835 noclean:
1836                 /*
1837                  * Remove from the aggrprobe: this path will do nothing in
1838                  * __unregister_kprobe_bottom().
1839                  */
1840                 list_del_rcu(&p->list);
1841                 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1842                         /*
1843                          * Try to optimize this probe again, because post
1844                          * handler may have been changed.
1845                          */
1846                         optimize_kprobe(ap);
1847         }
1848         return 0;
1849
1850 disarmed:
1851         hlist_del_rcu(&ap->hlist);
1852         return 0;
1853 }
1854
1855 static void __unregister_kprobe_bottom(struct kprobe *p)
1856 {
1857         struct kprobe *ap;
1858
1859         if (list_empty(&p->list))
1860                 /* This is an independent kprobe */
1861                 arch_remove_kprobe(p);
1862         else if (list_is_singular(&p->list)) {
1863                 /* This is the last child of an aggrprobe */
1864                 ap = list_entry(p->list.next, struct kprobe, list);
1865                 list_del(&p->list);
1866                 free_aggr_kprobe(ap);
1867         }
1868         /* Otherwise, do nothing. */
1869 }
1870
1871 int register_kprobes(struct kprobe **kps, int num)
1872 {
1873         int i, ret = 0;
1874
1875         if (num <= 0)
1876                 return -EINVAL;
1877         for (i = 0; i < num; i++) {
1878                 ret = register_kprobe(kps[i]);
1879                 if (ret < 0) {
1880                         if (i > 0)
1881                                 unregister_kprobes(kps, i);
1882                         break;
1883                 }
1884         }
1885         return ret;
1886 }
1887 EXPORT_SYMBOL_GPL(register_kprobes);
1888
1889 void unregister_kprobe(struct kprobe *p)
1890 {
1891         unregister_kprobes(&p, 1);
1892 }
1893 EXPORT_SYMBOL_GPL(unregister_kprobe);
1894
1895 void unregister_kprobes(struct kprobe **kps, int num)
1896 {
1897         int i;
1898
1899         if (num <= 0)
1900                 return;
1901         mutex_lock(&kprobe_mutex);
1902         for (i = 0; i < num; i++)
1903                 if (__unregister_kprobe_top(kps[i]) < 0)
1904                         kps[i]->addr = NULL;
1905         mutex_unlock(&kprobe_mutex);
1906
1907         synchronize_rcu();
1908         for (i = 0; i < num; i++)
1909                 if (kps[i]->addr)
1910                         __unregister_kprobe_bottom(kps[i]);
1911 }
1912 EXPORT_SYMBOL_GPL(unregister_kprobes);
1913
1914 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1915                                         unsigned long val, void *data)
1916 {
1917         return NOTIFY_DONE;
1918 }
1919 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1920
1921 static struct notifier_block kprobe_exceptions_nb = {
1922         .notifier_call = kprobe_exceptions_notify,
1923         .priority = 0x7fffffff /* we need to be notified first */
1924 };
1925
1926 #ifdef CONFIG_KRETPROBES
1927
1928 /* This assumes the 'tsk' is the current task or the is not running. */
1929 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1930                                                   struct llist_node **cur)
1931 {
1932         struct kretprobe_instance *ri = NULL;
1933         struct llist_node *node = *cur;
1934
1935         if (!node)
1936                 node = tsk->kretprobe_instances.first;
1937         else
1938                 node = node->next;
1939
1940         while (node) {
1941                 ri = container_of(node, struct kretprobe_instance, llist);
1942                 if (ri->ret_addr != kretprobe_trampoline_addr()) {
1943                         *cur = node;
1944                         return ri->ret_addr;
1945                 }
1946                 node = node->next;
1947         }
1948         return NULL;
1949 }
1950 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1951
1952 /**
1953  * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1954  * @tsk: Target task
1955  * @fp: A frame pointer
1956  * @cur: a storage of the loop cursor llist_node pointer for next call
1957  *
1958  * Find the correct return address modified by a kretprobe on @tsk in unsigned
1959  * long type. If it finds the return address, this returns that address value,
1960  * or this returns 0.
1961  * The @tsk must be 'current' or a task which is not running. @fp is a hint
1962  * to get the currect return address - which is compared with the
1963  * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
1964  * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
1965  * first call, but '@cur' itself must NOT NULL.
1966  */
1967 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
1968                                       struct llist_node **cur)
1969 {
1970         struct kretprobe_instance *ri = NULL;
1971         kprobe_opcode_t *ret;
1972
1973         if (WARN_ON_ONCE(!cur))
1974                 return 0;
1975
1976         do {
1977                 ret = __kretprobe_find_ret_addr(tsk, cur);
1978                 if (!ret)
1979                         break;
1980                 ri = container_of(*cur, struct kretprobe_instance, llist);
1981         } while (ri->fp != fp);
1982
1983         return (unsigned long)ret;
1984 }
1985 NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
1986
1987 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
1988                                         kprobe_opcode_t *correct_ret_addr)
1989 {
1990         /*
1991          * Do nothing by default. Please fill this to update the fake return
1992          * address on the stack with the correct one on each arch if possible.
1993          */
1994 }
1995
1996 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1997                                              void *frame_pointer)
1998 {
1999         kprobe_opcode_t *correct_ret_addr = NULL;
2000         struct kretprobe_instance *ri = NULL;
2001         struct llist_node *first, *node = NULL;
2002         struct kretprobe *rp;
2003
2004         /* Find correct address and all nodes for this frame. */
2005         correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2006         if (!correct_ret_addr) {
2007                 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2008                 BUG_ON(1);
2009         }
2010
2011         /*
2012          * Set the return address as the instruction pointer, because if the
2013          * user handler calls stack_trace_save_regs() with this 'regs',
2014          * the stack trace will start from the instruction pointer.
2015          */
2016         instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
2017
2018         /* Run the user handler of the nodes. */
2019         first = current->kretprobe_instances.first;
2020         while (first) {
2021                 ri = container_of(first, struct kretprobe_instance, llist);
2022
2023                 if (WARN_ON_ONCE(ri->fp != frame_pointer))
2024                         break;
2025
2026                 rp = get_kretprobe(ri);
2027                 if (rp && rp->handler) {
2028                         struct kprobe *prev = kprobe_running();
2029
2030                         __this_cpu_write(current_kprobe, &rp->kp);
2031                         ri->ret_addr = correct_ret_addr;
2032                         rp->handler(ri, regs);
2033                         __this_cpu_write(current_kprobe, prev);
2034                 }
2035                 if (first == node)
2036                         break;
2037
2038                 first = first->next;
2039         }
2040
2041         arch_kretprobe_fixup_return(regs, correct_ret_addr);
2042
2043         /* Unlink all nodes for this frame. */
2044         first = current->kretprobe_instances.first;
2045         current->kretprobe_instances.first = node->next;
2046         node->next = NULL;
2047
2048         /* Recycle free instances. */
2049         while (first) {
2050                 ri = container_of(first, struct kretprobe_instance, llist);
2051                 first = first->next;
2052
2053                 recycle_rp_inst(ri);
2054         }
2055
2056         return (unsigned long)correct_ret_addr;
2057 }
2058 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2059
2060 /*
2061  * This kprobe pre_handler is registered with every kretprobe. When probe
2062  * hits it will set up the return probe.
2063  */
2064 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2065 {
2066         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2067         struct kretprobe_instance *ri;
2068         struct freelist_node *fn;
2069
2070         fn = freelist_try_get(&rp->freelist);
2071         if (!fn) {
2072                 rp->nmissed++;
2073                 return 0;
2074         }
2075
2076         ri = container_of(fn, struct kretprobe_instance, freelist);
2077
2078         if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2079                 freelist_add(&ri->freelist, &rp->freelist);
2080                 return 0;
2081         }
2082
2083         arch_prepare_kretprobe(ri, regs);
2084
2085         __llist_add(&ri->llist, &current->kretprobe_instances);
2086
2087         return 0;
2088 }
2089 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2090
2091 /**
2092  * kprobe_on_func_entry() -- check whether given address is function entry
2093  * @addr: Target address
2094  * @sym:  Target symbol name
2095  * @offset: The offset from the symbol or the address
2096  *
2097  * This checks whether the given @addr+@offset or @sym+@offset is on the
2098  * function entry address or not.
2099  * This returns 0 if it is the function entry, or -EINVAL if it is not.
2100  * And also it returns -ENOENT if it fails the symbol or address lookup.
2101  * Caller must pass @addr or @sym (either one must be NULL), or this
2102  * returns -EINVAL.
2103  */
2104 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2105 {
2106         bool on_func_entry;
2107         kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
2108
2109         if (IS_ERR(kp_addr))
2110                 return PTR_ERR(kp_addr);
2111
2112         if (!on_func_entry)
2113                 return -EINVAL;
2114
2115         return 0;
2116 }
2117
2118 int register_kretprobe(struct kretprobe *rp)
2119 {
2120         int ret;
2121         struct kretprobe_instance *inst;
2122         int i;
2123         void *addr;
2124
2125         ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2126         if (ret)
2127                 return ret;
2128
2129         /* If only 'rp->kp.addr' is specified, check reregistering kprobes */
2130         if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
2131                 return -EINVAL;
2132
2133         if (kretprobe_blacklist_size) {
2134                 addr = kprobe_addr(&rp->kp);
2135                 if (IS_ERR(addr))
2136                         return PTR_ERR(addr);
2137
2138                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2139                         if (kretprobe_blacklist[i].addr == addr)
2140                                 return -EINVAL;
2141                 }
2142         }
2143
2144         if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2145                 return -E2BIG;
2146
2147         rp->kp.pre_handler = pre_handler_kretprobe;
2148         rp->kp.post_handler = NULL;
2149
2150         /* Pre-allocate memory for max kretprobe instances */
2151         if (rp->maxactive <= 0) {
2152 #ifdef CONFIG_PREEMPTION
2153                 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2154 #else
2155                 rp->maxactive = num_possible_cpus();
2156 #endif
2157         }
2158         rp->freelist.head = NULL;
2159         rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2160         if (!rp->rph)
2161                 return -ENOMEM;
2162
2163         rp->rph->rp = rp;
2164         for (i = 0; i < rp->maxactive; i++) {
2165                 inst = kzalloc(sizeof(struct kretprobe_instance) +
2166                                rp->data_size, GFP_KERNEL);
2167                 if (inst == NULL) {
2168                         refcount_set(&rp->rph->ref, i);
2169                         free_rp_inst(rp);
2170                         return -ENOMEM;
2171                 }
2172                 inst->rph = rp->rph;
2173                 freelist_add(&inst->freelist, &rp->freelist);
2174         }
2175         refcount_set(&rp->rph->ref, i);
2176
2177         rp->nmissed = 0;
2178         /* Establish function entry probe point */
2179         ret = register_kprobe(&rp->kp);
2180         if (ret != 0)
2181                 free_rp_inst(rp);
2182         return ret;
2183 }
2184 EXPORT_SYMBOL_GPL(register_kretprobe);
2185
2186 int register_kretprobes(struct kretprobe **rps, int num)
2187 {
2188         int ret = 0, i;
2189
2190         if (num <= 0)
2191                 return -EINVAL;
2192         for (i = 0; i < num; i++) {
2193                 ret = register_kretprobe(rps[i]);
2194                 if (ret < 0) {
2195                         if (i > 0)
2196                                 unregister_kretprobes(rps, i);
2197                         break;
2198                 }
2199         }
2200         return ret;
2201 }
2202 EXPORT_SYMBOL_GPL(register_kretprobes);
2203
2204 void unregister_kretprobe(struct kretprobe *rp)
2205 {
2206         unregister_kretprobes(&rp, 1);
2207 }
2208 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2209
2210 void unregister_kretprobes(struct kretprobe **rps, int num)
2211 {
2212         int i;
2213
2214         if (num <= 0)
2215                 return;
2216         mutex_lock(&kprobe_mutex);
2217         for (i = 0; i < num; i++) {
2218                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2219                         rps[i]->kp.addr = NULL;
2220                 rps[i]->rph->rp = NULL;
2221         }
2222         mutex_unlock(&kprobe_mutex);
2223
2224         synchronize_rcu();
2225         for (i = 0; i < num; i++) {
2226                 if (rps[i]->kp.addr) {
2227                         __unregister_kprobe_bottom(&rps[i]->kp);
2228                         free_rp_inst(rps[i]);
2229                 }
2230         }
2231 }
2232 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2233
2234 #else /* CONFIG_KRETPROBES */
2235 int register_kretprobe(struct kretprobe *rp)
2236 {
2237         return -EOPNOTSUPP;
2238 }
2239 EXPORT_SYMBOL_GPL(register_kretprobe);
2240
2241 int register_kretprobes(struct kretprobe **rps, int num)
2242 {
2243         return -EOPNOTSUPP;
2244 }
2245 EXPORT_SYMBOL_GPL(register_kretprobes);
2246
2247 void unregister_kretprobe(struct kretprobe *rp)
2248 {
2249 }
2250 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2251
2252 void unregister_kretprobes(struct kretprobe **rps, int num)
2253 {
2254 }
2255 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2256
2257 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2258 {
2259         return 0;
2260 }
2261 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2262
2263 #endif /* CONFIG_KRETPROBES */
2264
2265 /* Set the kprobe gone and remove its instruction buffer. */
2266 static void kill_kprobe(struct kprobe *p)
2267 {
2268         struct kprobe *kp;
2269
2270         lockdep_assert_held(&kprobe_mutex);
2271
2272         p->flags |= KPROBE_FLAG_GONE;
2273         if (kprobe_aggrprobe(p)) {
2274                 /*
2275                  * If this is an aggr_kprobe, we have to list all the
2276                  * chained probes and mark them GONE.
2277                  */
2278                 list_for_each_entry(kp, &p->list, list)
2279                         kp->flags |= KPROBE_FLAG_GONE;
2280                 p->post_handler = NULL;
2281                 kill_optimized_kprobe(p);
2282         }
2283         /*
2284          * Here, we can remove insn_slot safely, because no thread calls
2285          * the original probed function (which will be freed soon) any more.
2286          */
2287         arch_remove_kprobe(p);
2288
2289         /*
2290          * The module is going away. We should disarm the kprobe which
2291          * is using ftrace, because ftrace framework is still available at
2292          * 'MODULE_STATE_GOING' notification.
2293          */
2294         if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2295                 disarm_kprobe_ftrace(p);
2296 }
2297
2298 /* Disable one kprobe */
2299 int disable_kprobe(struct kprobe *kp)
2300 {
2301         int ret = 0;
2302         struct kprobe *p;
2303
2304         mutex_lock(&kprobe_mutex);
2305
2306         /* Disable this kprobe */
2307         p = __disable_kprobe(kp);
2308         if (IS_ERR(p))
2309                 ret = PTR_ERR(p);
2310
2311         mutex_unlock(&kprobe_mutex);
2312         return ret;
2313 }
2314 EXPORT_SYMBOL_GPL(disable_kprobe);
2315
2316 /* Enable one kprobe */
2317 int enable_kprobe(struct kprobe *kp)
2318 {
2319         int ret = 0;
2320         struct kprobe *p;
2321
2322         mutex_lock(&kprobe_mutex);
2323
2324         /* Check whether specified probe is valid. */
2325         p = __get_valid_kprobe(kp);
2326         if (unlikely(p == NULL)) {
2327                 ret = -EINVAL;
2328                 goto out;
2329         }
2330
2331         if (kprobe_gone(kp)) {
2332                 /* This kprobe has gone, we couldn't enable it. */
2333                 ret = -EINVAL;
2334                 goto out;
2335         }
2336
2337         if (p != kp)
2338                 kp->flags &= ~KPROBE_FLAG_DISABLED;
2339
2340         if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2341                 p->flags &= ~KPROBE_FLAG_DISABLED;
2342                 ret = arm_kprobe(p);
2343                 if (ret)
2344                         p->flags |= KPROBE_FLAG_DISABLED;
2345         }
2346 out:
2347         mutex_unlock(&kprobe_mutex);
2348         return ret;
2349 }
2350 EXPORT_SYMBOL_GPL(enable_kprobe);
2351
2352 /* Caller must NOT call this in usual path. This is only for critical case */
2353 void dump_kprobe(struct kprobe *kp)
2354 {
2355         pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
2356                kp->symbol_name, kp->offset, kp->addr);
2357 }
2358 NOKPROBE_SYMBOL(dump_kprobe);
2359
2360 int kprobe_add_ksym_blacklist(unsigned long entry)
2361 {
2362         struct kprobe_blacklist_entry *ent;
2363         unsigned long offset = 0, size = 0;
2364
2365         if (!kernel_text_address(entry) ||
2366             !kallsyms_lookup_size_offset(entry, &size, &offset))
2367                 return -EINVAL;
2368
2369         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2370         if (!ent)
2371                 return -ENOMEM;
2372         ent->start_addr = entry;
2373         ent->end_addr = entry + size;
2374         INIT_LIST_HEAD(&ent->list);
2375         list_add_tail(&ent->list, &kprobe_blacklist);
2376
2377         return (int)size;
2378 }
2379
2380 /* Add all symbols in given area into kprobe blacklist */
2381 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2382 {
2383         unsigned long entry;
2384         int ret = 0;
2385
2386         for (entry = start; entry < end; entry += ret) {
2387                 ret = kprobe_add_ksym_blacklist(entry);
2388                 if (ret < 0)
2389                         return ret;
2390                 if (ret == 0)   /* In case of alias symbol */
2391                         ret = 1;
2392         }
2393         return 0;
2394 }
2395
2396 /* Remove all symbols in given area from kprobe blacklist */
2397 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2398 {
2399         struct kprobe_blacklist_entry *ent, *n;
2400
2401         list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2402                 if (ent->start_addr < start || ent->start_addr >= end)
2403                         continue;
2404                 list_del(&ent->list);
2405                 kfree(ent);
2406         }
2407 }
2408
2409 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2410 {
2411         kprobe_remove_area_blacklist(entry, entry + 1);
2412 }
2413
2414 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2415                                    char *type, char *sym)
2416 {
2417         return -ERANGE;
2418 }
2419
2420 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2421                        char *sym)
2422 {
2423 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2424         if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2425                 return 0;
2426 #ifdef CONFIG_OPTPROBES
2427         if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2428                 return 0;
2429 #endif
2430 #endif
2431         if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2432                 return 0;
2433         return -ERANGE;
2434 }
2435
2436 int __init __weak arch_populate_kprobe_blacklist(void)
2437 {
2438         return 0;
2439 }
2440
2441 /*
2442  * Lookup and populate the kprobe_blacklist.
2443  *
2444  * Unlike the kretprobe blacklist, we'll need to determine
2445  * the range of addresses that belong to the said functions,
2446  * since a kprobe need not necessarily be at the beginning
2447  * of a function.
2448  */
2449 static int __init populate_kprobe_blacklist(unsigned long *start,
2450                                              unsigned long *end)
2451 {
2452         unsigned long entry;
2453         unsigned long *iter;
2454         int ret;
2455
2456         for (iter = start; iter < end; iter++) {
2457                 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
2458                 ret = kprobe_add_ksym_blacklist(entry);
2459                 if (ret == -EINVAL)
2460                         continue;
2461                 if (ret < 0)
2462                         return ret;
2463         }
2464
2465         /* Symbols in '__kprobes_text' are blacklisted */
2466         ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2467                                         (unsigned long)__kprobes_text_end);
2468         if (ret)
2469                 return ret;
2470
2471         /* Symbols in 'noinstr' section are blacklisted */
2472         ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2473                                         (unsigned long)__noinstr_text_end);
2474
2475         return ret ? : arch_populate_kprobe_blacklist();
2476 }
2477
2478 static void add_module_kprobe_blacklist(struct module *mod)
2479 {
2480         unsigned long start, end;
2481         int i;
2482
2483         if (mod->kprobe_blacklist) {
2484                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2485                         kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2486         }
2487
2488         start = (unsigned long)mod->kprobes_text_start;
2489         if (start) {
2490                 end = start + mod->kprobes_text_size;
2491                 kprobe_add_area_blacklist(start, end);
2492         }
2493
2494         start = (unsigned long)mod->noinstr_text_start;
2495         if (start) {
2496                 end = start + mod->noinstr_text_size;
2497                 kprobe_add_area_blacklist(start, end);
2498         }
2499 }
2500
2501 static void remove_module_kprobe_blacklist(struct module *mod)
2502 {
2503         unsigned long start, end;
2504         int i;
2505
2506         if (mod->kprobe_blacklist) {
2507                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2508                         kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2509         }
2510
2511         start = (unsigned long)mod->kprobes_text_start;
2512         if (start) {
2513                 end = start + mod->kprobes_text_size;
2514                 kprobe_remove_area_blacklist(start, end);
2515         }
2516
2517         start = (unsigned long)mod->noinstr_text_start;
2518         if (start) {
2519                 end = start + mod->noinstr_text_size;
2520                 kprobe_remove_area_blacklist(start, end);
2521         }
2522 }
2523
2524 /* Module notifier call back, checking kprobes on the module */
2525 static int kprobes_module_callback(struct notifier_block *nb,
2526                                    unsigned long val, void *data)
2527 {
2528         struct module *mod = data;
2529         struct hlist_head *head;
2530         struct kprobe *p;
2531         unsigned int i;
2532         int checkcore = (val == MODULE_STATE_GOING);
2533
2534         if (val == MODULE_STATE_COMING) {
2535                 mutex_lock(&kprobe_mutex);
2536                 add_module_kprobe_blacklist(mod);
2537                 mutex_unlock(&kprobe_mutex);
2538         }
2539         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2540                 return NOTIFY_DONE;
2541
2542         /*
2543          * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2544          * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2545          * notified, only '.init.text' section would be freed. We need to
2546          * disable kprobes which have been inserted in the sections.
2547          */
2548         mutex_lock(&kprobe_mutex);
2549         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2550                 head = &kprobe_table[i];
2551                 hlist_for_each_entry(p, head, hlist)
2552                         if (within_module_init((unsigned long)p->addr, mod) ||
2553                             (checkcore &&
2554                              within_module_core((unsigned long)p->addr, mod))) {
2555                                 /*
2556                                  * The vaddr this probe is installed will soon
2557                                  * be vfreed buy not synced to disk. Hence,
2558                                  * disarming the breakpoint isn't needed.
2559                                  *
2560                                  * Note, this will also move any optimized probes
2561                                  * that are pending to be removed from their
2562                                  * corresponding lists to the 'freeing_list' and
2563                                  * will not be touched by the delayed
2564                                  * kprobe_optimizer() work handler.
2565                                  */
2566                                 kill_kprobe(p);
2567                         }
2568         }
2569         if (val == MODULE_STATE_GOING)
2570                 remove_module_kprobe_blacklist(mod);
2571         mutex_unlock(&kprobe_mutex);
2572         return NOTIFY_DONE;
2573 }
2574
2575 static struct notifier_block kprobe_module_nb = {
2576         .notifier_call = kprobes_module_callback,
2577         .priority = 0
2578 };
2579
2580 void kprobe_free_init_mem(void)
2581 {
2582         void *start = (void *)(&__init_begin);
2583         void *end = (void *)(&__init_end);
2584         struct hlist_head *head;
2585         struct kprobe *p;
2586         int i;
2587
2588         mutex_lock(&kprobe_mutex);
2589
2590         /* Kill all kprobes on initmem because the target code has been freed. */
2591         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2592                 head = &kprobe_table[i];
2593                 hlist_for_each_entry(p, head, hlist) {
2594                         if (start <= (void *)p->addr && (void *)p->addr < end)
2595                                 kill_kprobe(p);
2596                 }
2597         }
2598
2599         mutex_unlock(&kprobe_mutex);
2600 }
2601
2602 static int __init init_kprobes(void)
2603 {
2604         int i, err = 0;
2605
2606         /* FIXME allocate the probe table, currently defined statically */
2607         /* initialize all list heads */
2608         for (i = 0; i < KPROBE_TABLE_SIZE; i++)
2609                 INIT_HLIST_HEAD(&kprobe_table[i]);
2610
2611         err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2612                                         __stop_kprobe_blacklist);
2613         if (err)
2614                 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
2615
2616         if (kretprobe_blacklist_size) {
2617                 /* lookup the function address from its name */
2618                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2619                         kretprobe_blacklist[i].addr =
2620                                 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2621                         if (!kretprobe_blacklist[i].addr)
2622                                 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
2623                                        kretprobe_blacklist[i].name);
2624                 }
2625         }
2626
2627         /* By default, kprobes are armed */
2628         kprobes_all_disarmed = false;
2629
2630 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2631         /* Init 'kprobe_optinsn_slots' for allocation */
2632         kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2633 #endif
2634
2635         err = arch_init_kprobes();
2636         if (!err)
2637                 err = register_die_notifier(&kprobe_exceptions_nb);
2638         if (!err)
2639                 err = register_module_notifier(&kprobe_module_nb);
2640
2641         kprobes_initialized = (err == 0);
2642         kprobe_sysctls_init();
2643         return err;
2644 }
2645 early_initcall(init_kprobes);
2646
2647 #if defined(CONFIG_OPTPROBES)
2648 static int __init init_optprobes(void)
2649 {
2650         /*
2651          * Enable kprobe optimization - this kicks the optimizer which
2652          * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2653          * not spawned in early initcall. So delay the optimization.
2654          */
2655         optimize_all_kprobes();
2656
2657         return 0;
2658 }
2659 subsys_initcall(init_optprobes);
2660 #endif
2661
2662 #ifdef CONFIG_DEBUG_FS
2663 static void report_probe(struct seq_file *pi, struct kprobe *p,
2664                 const char *sym, int offset, char *modname, struct kprobe *pp)
2665 {
2666         char *kprobe_type;
2667         void *addr = p->addr;
2668
2669         if (p->pre_handler == pre_handler_kretprobe)
2670                 kprobe_type = "r";
2671         else
2672                 kprobe_type = "k";
2673
2674         if (!kallsyms_show_value(pi->file->f_cred))
2675                 addr = NULL;
2676
2677         if (sym)
2678                 seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2679                         addr, kprobe_type, sym, offset,
2680                         (modname ? modname : " "));
2681         else    /* try to use %pS */
2682                 seq_printf(pi, "%px  %s  %pS ",
2683                         addr, kprobe_type, p->addr);
2684
2685         if (!pp)
2686                 pp = p;
2687         seq_printf(pi, "%s%s%s%s\n",
2688                 (kprobe_gone(p) ? "[GONE]" : ""),
2689                 ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2690                 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2691                 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2692 }
2693
2694 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2695 {
2696         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2697 }
2698
2699 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2700 {
2701         (*pos)++;
2702         if (*pos >= KPROBE_TABLE_SIZE)
2703                 return NULL;
2704         return pos;
2705 }
2706
2707 static void kprobe_seq_stop(struct seq_file *f, void *v)
2708 {
2709         /* Nothing to do */
2710 }
2711
2712 static int show_kprobe_addr(struct seq_file *pi, void *v)
2713 {
2714         struct hlist_head *head;
2715         struct kprobe *p, *kp;
2716         const char *sym = NULL;
2717         unsigned int i = *(loff_t *) v;
2718         unsigned long offset = 0;
2719         char *modname, namebuf[KSYM_NAME_LEN];
2720
2721         head = &kprobe_table[i];
2722         preempt_disable();
2723         hlist_for_each_entry_rcu(p, head, hlist) {
2724                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2725                                         &offset, &modname, namebuf);
2726                 if (kprobe_aggrprobe(p)) {
2727                         list_for_each_entry_rcu(kp, &p->list, list)
2728                                 report_probe(pi, kp, sym, offset, modname, p);
2729                 } else
2730                         report_probe(pi, p, sym, offset, modname, NULL);
2731         }
2732         preempt_enable();
2733         return 0;
2734 }
2735
2736 static const struct seq_operations kprobes_sops = {
2737         .start = kprobe_seq_start,
2738         .next  = kprobe_seq_next,
2739         .stop  = kprobe_seq_stop,
2740         .show  = show_kprobe_addr
2741 };
2742
2743 DEFINE_SEQ_ATTRIBUTE(kprobes);
2744
2745 /* kprobes/blacklist -- shows which functions can not be probed */
2746 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2747 {
2748         mutex_lock(&kprobe_mutex);
2749         return seq_list_start(&kprobe_blacklist, *pos);
2750 }
2751
2752 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2753 {
2754         return seq_list_next(v, &kprobe_blacklist, pos);
2755 }
2756
2757 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2758 {
2759         struct kprobe_blacklist_entry *ent =
2760                 list_entry(v, struct kprobe_blacklist_entry, list);
2761
2762         /*
2763          * If '/proc/kallsyms' is not showing kernel address, we won't
2764          * show them here either.
2765          */
2766         if (!kallsyms_show_value(m->file->f_cred))
2767                 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2768                            (void *)ent->start_addr);
2769         else
2770                 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2771                            (void *)ent->end_addr, (void *)ent->start_addr);
2772         return 0;
2773 }
2774
2775 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2776 {
2777         mutex_unlock(&kprobe_mutex);
2778 }
2779
2780 static const struct seq_operations kprobe_blacklist_sops = {
2781         .start = kprobe_blacklist_seq_start,
2782         .next  = kprobe_blacklist_seq_next,
2783         .stop  = kprobe_blacklist_seq_stop,
2784         .show  = kprobe_blacklist_seq_show,
2785 };
2786 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2787
2788 static int arm_all_kprobes(void)
2789 {
2790         struct hlist_head *head;
2791         struct kprobe *p;
2792         unsigned int i, total = 0, errors = 0;
2793         int err, ret = 0;
2794
2795         mutex_lock(&kprobe_mutex);
2796
2797         /* If kprobes are armed, just return */
2798         if (!kprobes_all_disarmed)
2799                 goto already_enabled;
2800
2801         /*
2802          * optimize_kprobe() called by arm_kprobe() checks
2803          * kprobes_all_disarmed, so set kprobes_all_disarmed before
2804          * arm_kprobe.
2805          */
2806         kprobes_all_disarmed = false;
2807         /* Arming kprobes doesn't optimize kprobe itself */
2808         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2809                 head = &kprobe_table[i];
2810                 /* Arm all kprobes on a best-effort basis */
2811                 hlist_for_each_entry(p, head, hlist) {
2812                         if (!kprobe_disabled(p)) {
2813                                 err = arm_kprobe(p);
2814                                 if (err)  {
2815                                         errors++;
2816                                         ret = err;
2817                                 }
2818                                 total++;
2819                         }
2820                 }
2821         }
2822
2823         if (errors)
2824                 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
2825                         errors, total);
2826         else
2827                 pr_info("Kprobes globally enabled\n");
2828
2829 already_enabled:
2830         mutex_unlock(&kprobe_mutex);
2831         return ret;
2832 }
2833
2834 static int disarm_all_kprobes(void)
2835 {
2836         struct hlist_head *head;
2837         struct kprobe *p;
2838         unsigned int i, total = 0, errors = 0;
2839         int err, ret = 0;
2840
2841         mutex_lock(&kprobe_mutex);
2842
2843         /* If kprobes are already disarmed, just return */
2844         if (kprobes_all_disarmed) {
2845                 mutex_unlock(&kprobe_mutex);
2846                 return 0;
2847         }
2848
2849         kprobes_all_disarmed = true;
2850
2851         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2852                 head = &kprobe_table[i];
2853                 /* Disarm all kprobes on a best-effort basis */
2854                 hlist_for_each_entry(p, head, hlist) {
2855                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2856                                 err = disarm_kprobe(p, false);
2857                                 if (err) {
2858                                         errors++;
2859                                         ret = err;
2860                                 }
2861                                 total++;
2862                         }
2863                 }
2864         }
2865
2866         if (errors)
2867                 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
2868                         errors, total);
2869         else
2870                 pr_info("Kprobes globally disabled\n");
2871
2872         mutex_unlock(&kprobe_mutex);
2873
2874         /* Wait for disarming all kprobes by optimizer */
2875         wait_for_kprobe_optimizer();
2876
2877         return ret;
2878 }
2879
2880 /*
2881  * XXX: The debugfs bool file interface doesn't allow for callbacks
2882  * when the bool state is switched. We can reuse that facility when
2883  * available
2884  */
2885 static ssize_t read_enabled_file_bool(struct file *file,
2886                char __user *user_buf, size_t count, loff_t *ppos)
2887 {
2888         char buf[3];
2889
2890         if (!kprobes_all_disarmed)
2891                 buf[0] = '1';
2892         else
2893                 buf[0] = '0';
2894         buf[1] = '\n';
2895         buf[2] = 0x00;
2896         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2897 }
2898
2899 static ssize_t write_enabled_file_bool(struct file *file,
2900                const char __user *user_buf, size_t count, loff_t *ppos)
2901 {
2902         bool enable;
2903         int ret;
2904
2905         ret = kstrtobool_from_user(user_buf, count, &enable);
2906         if (ret)
2907                 return ret;
2908
2909         ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
2910         if (ret)
2911                 return ret;
2912
2913         return count;
2914 }
2915
2916 static const struct file_operations fops_kp = {
2917         .read =         read_enabled_file_bool,
2918         .write =        write_enabled_file_bool,
2919         .llseek =       default_llseek,
2920 };
2921
2922 static int __init debugfs_kprobe_init(void)
2923 {
2924         struct dentry *dir;
2925
2926         dir = debugfs_create_dir("kprobes", NULL);
2927
2928         debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
2929
2930         debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
2931
2932         debugfs_create_file("blacklist", 0400, dir, NULL,
2933                             &kprobe_blacklist_fops);
2934
2935         return 0;
2936 }
2937
2938 late_initcall(debugfs_kprobe_init);
2939 #endif /* CONFIG_DEBUG_FS */