driver core: Add waiting_for_supplier sysfs file for devices
[linux-2.6-microblaze.git] / kernel / kprobes.c
index 50cd84f..4a904cc 100644 (file)
 
 
 static int kprobes_initialized;
+/* kprobe_table can be accessed by
+ * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
+ * Or
+ * - RCU hlist traversal under disabling preempt (breakpoint handlers)
+ */
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
@@ -326,7 +331,8 @@ struct kprobe *get_kprobe(void *addr)
        struct kprobe *p;
 
        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, head, hlist) {
+       hlist_for_each_entry_rcu(p, head, hlist,
+                                lockdep_is_held(&kprobe_mutex)) {
                if (p->addr == addr)
                        return p;
        }
@@ -586,11 +592,12 @@ static void kprobe_optimizer(struct work_struct *work)
        mutex_unlock(&module_mutex);
        mutex_unlock(&text_mutex);
        cpus_read_unlock();
-       mutex_unlock(&kprobe_mutex);
 
        /* Step 5: Kick optimizer again if needed */
        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
                kick_kprobe_optimizer();
+
+       mutex_unlock(&kprobe_mutex);
 }
 
 /* Wait for completing optimization and unoptimization */
@@ -668,8 +675,6 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
        lockdep_assert_cpus_held();
        arch_unoptimize_kprobe(op);
        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
-       if (kprobe_disabled(&op->kp))
-               arch_disarm_kprobe(&op->kp);
 }
 
 /* Unoptimize a kprobe if p is optimized */
@@ -849,7 +854,7 @@ static void optimize_all_kprobes(void)
        kprobes_allow_optimization = true;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
+               hlist_for_each_entry(p, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
@@ -876,7 +881,7 @@ static void unoptimize_all_kprobes(void)
        kprobes_allow_optimization = false;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!kprobe_disabled(p))
                                unoptimize_kprobe(p, false);
                }
@@ -1236,6 +1241,26 @@ __releases(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_unlock);
 
+struct kprobe kprobe_busy = {
+       .addr = (void *) get_kprobe,
+};
+
+void kprobe_busy_begin(void)
+{
+       struct kprobe_ctlblk *kcb;
+
+       preempt_disable();
+       __this_cpu_write(current_kprobe, &kprobe_busy);
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void kprobe_busy_end(void)
+{
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+}
+
 /*
  * This function is called from finish_task_switch when task tk becomes dead,
  * so that we can recycle any function-return probe instances associated
@@ -1253,6 +1278,8 @@ void kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       kprobe_busy_begin();
+
        INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
@@ -1266,6 +1293,8 @@ void kprobe_flush_task(struct task_struct *tk)
                hlist_del(&ri->hlist);
                kfree(ri);
        }
+
+       kprobe_busy_end();
 }
 NOKPROBE_SYMBOL(kprobe_flush_task);
 
@@ -1499,12 +1528,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
 {
        struct kprobe *ap, *list_p;
 
+       lockdep_assert_held(&kprobe_mutex);
+
        ap = get_kprobe(p->addr);
        if (unlikely(!ap))
                return NULL;
 
        if (p != ap) {
-               list_for_each_entry_rcu(list_p, &ap->list, list)
+               list_for_each_entry(list_p, &ap->list, list)
                        if (list_p == p)
                        /* kprobe p is a valid probe */
                                goto valid;
@@ -1669,7 +1700,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
 {
        struct kprobe *kp;
 
-       list_for_each_entry_rcu(kp, &ap->list, list)
+       lockdep_assert_held(&kprobe_mutex);
+
+       list_for_each_entry(kp, &ap->list, list)
                if (!kprobe_disabled(kp))
                        /*
                         * There is an active probe on the list.
@@ -1748,7 +1781,7 @@ static int __unregister_kprobe_top(struct kprobe *p)
        else {
                /* If disabling probe has special handlers, update aggrprobe */
                if (p->post_handler && !kprobe_gone(p)) {
-                       list_for_each_entry_rcu(list_p, &ap->list, list) {
+                       list_for_each_entry(list_p, &ap->list, list) {
                                if ((list_p != p) && (list_p->post_handler))
                                        goto noclean;
                        }
@@ -2062,13 +2095,15 @@ static void kill_kprobe(struct kprobe *p)
 {
        struct kprobe *kp;
 
+       lockdep_assert_held(&kprobe_mutex);
+
        p->flags |= KPROBE_FLAG_GONE;
        if (kprobe_aggrprobe(p)) {
                /*
                 * If this is an aggr_kprobe, we have to list all the
                 * chained probes and mark them GONE.
                 */
-               list_for_each_entry_rcu(kp, &p->list, list)
+               list_for_each_entry(kp, &p->list, list)
                        kp->flags |= KPROBE_FLAG_GONE;
                p->post_handler = NULL;
                kill_optimized_kprobe(p);
@@ -2312,7 +2347,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
        mutex_lock(&kprobe_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
+               hlist_for_each_entry(p, head, hlist)
                        if (within_module_init((unsigned long)p->addr, mod) ||
                            (checkcore &&
                             within_module_core((unsigned long)p->addr, mod))) {
@@ -2550,7 +2585,7 @@ static int arm_all_kprobes(void)
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                /* Arm all kprobes on a best-effort basis */
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!kprobe_disabled(p)) {
                                err = arm_kprobe(p);
                                if (err)  {
@@ -2593,7 +2628,7 @@ static int disarm_all_kprobes(void)
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                /* Disarm all kprobes on a best-effort basis */
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
                                err = disarm_kprobe(p, false);
                                if (err) {