platform/x86: asus-wmi: switch to use <linux/units.h> helpers
[linux-2.6-microblaze.git] / kernel / kprobes.c
index 53534aa..2625c24 100644 (file)
@@ -510,6 +510,8 @@ static void do_unoptimize_kprobes(void)
        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+               /* Switching from detour code to origin */
+               op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -610,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
        mutex_unlock(&kprobe_mutex);
 }
 
+static bool optprobe_queued_unopt(struct optimized_kprobe *op)
+{
+       struct optimized_kprobe *_op;
+
+       list_for_each_entry(_op, &unoptimizing_list, list) {
+               if (op == _op)
+                       return true;
+       }
+
+       return false;
+}
+
 /* Optimize kprobe if p is ready to be optimized */
 static void optimize_kprobe(struct kprobe *p)
 {
@@ -631,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
                return;
 
        /* Check if it is already optimized. */
-       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
+       if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
+               if (optprobe_queued_unopt(op)) {
+                       /* This is under unoptimizing. Just dequeue the probe */
+                       list_del_init(&op->list);
+               }
                return;
+       }
        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 
-       if (!list_empty(&op->list))
-               /* This is under unoptimizing. Just dequeue the probe */
-               list_del_init(&op->list);
-       else {
-               list_add(&op->list, &optimizing_list);
-               kick_kprobe_optimizer();
-       }
+       /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
+       if (WARN_ON_ONCE(!list_empty(&op->list)))
+               return;
+
+       list_add(&op->list, &optimizing_list);
+       kick_kprobe_optimizer();
 }
 
 /* Short cut to direct unoptimizing */
@@ -649,6 +667,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 {
        lockdep_assert_cpus_held();
        arch_unoptimize_kprobe(op);
+       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (kprobe_disabled(&op->kp))
                arch_disarm_kprobe(&op->kp);
 }
@@ -662,31 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
                return; /* This is not an optprobe nor optimized */
 
        op = container_of(p, struct optimized_kprobe, kp);
-       if (!kprobe_optimized(p)) {
-               /* Unoptimized or unoptimizing case */
-               if (force && !list_empty(&op->list)) {
-                       /*
-                        * Only if this is unoptimizing kprobe and forced,
-                        * forcibly unoptimize it. (No need to unoptimize
-                        * unoptimized kprobe again :)
-                        */
-                       list_del_init(&op->list);
-                       force_unoptimize_kprobe(op);
-               }
+       if (!kprobe_optimized(p))
                return;
-       }
 
-       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
        if (!list_empty(&op->list)) {
-               /* Dequeue from the optimization queue */
-               list_del_init(&op->list);
+               if (optprobe_queued_unopt(op)) {
+                       /* Queued in unoptimizing queue */
+                       if (force) {
+                               /*
+                                * Forcibly unoptimize the kprobe here, and queue it
+                                * in the freeing list for release afterwards.
+                                */
+                               force_unoptimize_kprobe(op);
+                               list_move(&op->list, &freeing_list);
+                       }
+               } else {
+                       /* Dequeue from the optimizing queue */
+                       list_del_init(&op->list);
+                       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+               }
                return;
        }
+
        /* Optimized kprobe case */
-       if (force)
+       if (force) {
                /* Forcibly update the code: this is a special case */
                force_unoptimize_kprobe(op);
-       else {
+       else {
                list_add(&op->list, &unoptimizing_list);
                kick_kprobe_optimizer();
        }