locking/osq: Use optimized spinning loop for arm64
[linux-2.6-microblaze.git] / kernel / locking / osq_lock.c
index 6ef600a..1f77349 100644 (file)
@@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock)
         * cmpxchg in an attempt to undo our queueing.
         */
 
-       while (!READ_ONCE(node->locked)) {
-               /*
-                * If we need to reschedule bail... so we can block.
-                * Use vcpu_is_preempted() to avoid waiting for a preempted
-                * lock holder:
-                */
-               if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
-                       goto unqueue;
-
-               cpu_relax();
-       }
-       return true;
+       /*
+        * Wait to acquire the lock or cancelation. Note that need_resched()
+        * will come with an IPI, which will wake smp_cond_load_relaxed() if it
+        * is implemented with a monitor-wait. vcpu_is_preempted() relies on
+        * polling, be careful.
+        */
+       if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
+                                 vcpu_is_preempted(node_cpu(node->prev))))
+               return true;
 
-unqueue:
+       /* unqueue */
        /*
         * Step - A  -- stabilize @prev
         *