thermal: intel_pch_thermal: fix build for ACPI not enabled
[linux-2.6-microblaze.git] / kernel / kprobes.c
index e995541..8a12a25 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/cpu.h>
 #include <linux/jump_label.h>
 #include <linux/perf_event.h>
+#include <linux/static_call.h>
 
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -1223,8 +1224,7 @@ void kprobes_inc_nmissed_count(struct kprobe *p)
 }
 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
 
-void recycle_rp_inst(struct kretprobe_instance *ri,
-                    struct hlist_head *head)
+static void recycle_rp_inst(struct kretprobe_instance *ri)
 {
        struct kretprobe *rp = ri->rp;
 
@@ -1236,12 +1236,11 @@ void recycle_rp_inst(struct kretprobe_instance *ri,
                hlist_add_head(&ri->hlist, &rp->free_instances);
                raw_spin_unlock(&rp->lock);
        } else
-               /* Unregistering */
-               hlist_add_head(&ri->hlist, head);
+               kfree_rcu(ri, rcu);
 }
 NOKPROBE_SYMBOL(recycle_rp_inst);
 
-void kretprobe_hash_lock(struct task_struct *tsk,
+static void kretprobe_hash_lock(struct task_struct *tsk,
                         struct hlist_head **head, unsigned long *flags)
 __acquires(hlist_lock)
 {
@@ -1263,7 +1262,7 @@ __acquires(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_lock);
 
-void kretprobe_hash_unlock(struct task_struct *tsk,
+static void kretprobe_hash_unlock(struct task_struct *tsk,
                           unsigned long *flags)
 __releases(hlist_lock)
 {
@@ -1284,7 +1283,7 @@ __releases(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_unlock);
 
-struct kprobe kprobe_busy = {
+static struct kprobe kprobe_busy = {
        .addr = (void *) get_kprobe,
 };
 
@@ -1313,7 +1312,7 @@ void kprobe_busy_end(void)
 void kprobe_flush_task(struct task_struct *tk)
 {
        struct kretprobe_instance *ri;
-       struct hlist_head *head, empty_rp;
+       struct hlist_head *head;
        struct hlist_node *tmp;
        unsigned long hash, flags = 0;
 
@@ -1323,19 +1322,14 @@ void kprobe_flush_task(struct task_struct *tk)
 
        kprobe_busy_begin();
 
-       INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
        kretprobe_table_lock(hash, &flags);
        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task == tk)
-                       recycle_rp_inst(ri, &empty_rp);
+                       recycle_rp_inst(ri);
        }
        kretprobe_table_unlock(hash, &flags);
-       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
-               hlist_del(&ri->hlist);
-               kfree(ri);
-       }
 
        kprobe_busy_end();
 }
@@ -1359,7 +1353,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
        struct hlist_node *next;
        struct hlist_head *head;
 
-       /* No race here */
+       /* To avoid recursive kretprobe by NMI, set kprobe busy here */
+       kprobe_busy_begin();
        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
                kretprobe_table_lock(hash, &flags);
                head = &kretprobe_inst_table[hash];
@@ -1369,6 +1364,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
                }
                kretprobe_table_unlock(hash, &flags);
        }
+       kprobe_busy_end();
+
        free_rp_inst(rp);
 }
 NOKPROBE_SYMBOL(cleanup_rp_inst);
@@ -1634,6 +1631,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
        if (!kernel_text_address((unsigned long) p->addr) ||
            within_kprobe_blacklist((unsigned long) p->addr) ||
            jump_label_text_reserved(p->addr, p->addr) ||
+           static_call_text_reserved(p->addr, p->addr) ||
            find_bug((unsigned long)p->addr)) {
                ret = -EINVAL;
                goto out;
@@ -1927,6 +1925,97 @@ unsigned long __weak arch_deref_entry_point(void *entry)
 }
 
 #ifdef CONFIG_KRETPROBES
+
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+                                            void *trampoline_address,
+                                            void *frame_pointer)
+{
+       struct kretprobe_instance *ri = NULL, *last = NULL;
+       struct hlist_head *head;
+       struct hlist_node *tmp;
+       unsigned long flags;
+       kprobe_opcode_t *correct_ret_addr = NULL;
+       bool skipped = false;
+
+       kretprobe_hash_lock(current, &head, &flags);
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because multiple functions in the call path have
+        * return probes installed on them, and/or more than one
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always pushed into the head of the list
+        *     - when multiple return probes are registered for the same
+        *       function, the (chronologically) first instance's ret_addr
+        *       will be the real return address, and all the rest will
+        *       point to kretprobe_trampoline.
+        */
+       hlist_for_each_entry(ri, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+               /*
+                * Return probes must be pushed on this hash list correct
+                * order (same as return order) so that it can be popped
+                * correctly. However, if we find it is pushed it incorrect
+                * order, this means we find a function which should not be
+                * probed, because the wrong order entry is pushed on the
+                * path of processing other kretprobe itself.
+                */
+               if (ri->fp != frame_pointer) {
+                       if (!skipped)
+                               pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+                       skipped = true;
+                       continue;
+               }
+
+               correct_ret_addr = ri->ret_addr;
+               if (skipped)
+                       pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+                               ri->rp->kp.addr);
+
+               if (correct_ret_addr != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
+       last = ri;
+
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+               if (ri->fp != frame_pointer)
+                       continue;
+
+               if (ri->rp && ri->rp->handler) {
+                       struct kprobe *prev = kprobe_running();
+
+                       __this_cpu_write(current_kprobe, &ri->rp->kp);
+                       ri->ret_addr = correct_ret_addr;
+                       ri->rp->handler(ri, regs);
+                       __this_cpu_write(current_kprobe, prev);
+               }
+
+               recycle_rp_inst(ri);
+
+               if (ri == last)
+                       break;
+       }
+
+       kretprobe_hash_unlock(current, &flags);
+
+       return (unsigned long)correct_ret_addr;
+}
+NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
+
 /*
  * This kprobe pre_handler is registered with every kretprobe. When probe
  * hits it will set up the return probe.
@@ -1937,17 +2026,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
        unsigned long hash, flags = 0;
        struct kretprobe_instance *ri;
 
-       /*
-        * To avoid deadlocks, prohibit return probing in NMI contexts,
-        * just skip the probe and increase the (inexact) 'nmissed'
-        * statistical counter, so that the user is informed that
-        * something happened:
-        */
-       if (unlikely(in_nmi())) {
-               rp->nmissed++;
-               return 0;
-       }
-
        /* TODO: consider to only swap the RA after the last pre_handler fired */
        hash = hash_ptr(current, KPROBE_HASH_BITS);
        raw_spin_lock_irqsave(&rp->lock, flags);
@@ -2536,7 +2614,7 @@ static int __init init_kprobes(void)
                init_test_probes();
        return err;
 }
-subsys_initcall(init_kprobes);
+early_initcall(init_kprobes);
 
 #ifdef CONFIG_DEBUG_FS
 static void report_probe(struct seq_file *pi, struct kprobe *p,