x86/xen: cleanup includes in arch/x86/xen/spinlock.c
[linux-2.6-microblaze.git] / arch / x86 / xen / spinlock.c
index 973f10e..3776122 100644 (file)
@@ -3,24 +3,21 @@
  * Split spinlock implementation out into its own file, so it can be
  * compiled in a FTRACE-compatible way.
  */
-#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/log2.h>
-#include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/atomic.h>
 
 #include <asm/paravirt.h>
 #include <asm/qspinlock.h>
 
-#include <xen/interface/xen.h>
 #include <xen/events.h>
 
 #include "xen-ops.h"
-#include "debugfs.h"
 
 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
 static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
 static bool xen_pvspin = true;
 
 static void xen_qlock_kick(int cpu)
@@ -40,33 +37,24 @@ static void xen_qlock_kick(int cpu)
 static void xen_qlock_wait(u8 *byte, u8 val)
 {
        int irq = __this_cpu_read(lock_kicker_irq);
+       atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
 
        /* If kicker interrupts not initialized yet, just spin */
-       if (irq == -1)
+       if (irq == -1 || in_nmi())
                return;
 
-       /* clear pending */
-       xen_clear_irq_pending(irq);
-       barrier();
-
-       /*
-        * We check the byte value after clearing pending IRQ to make sure
-        * that we won't miss a wakeup event because of the clearing.
-        *
-        * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
-        * So it is effectively a memory barrier for x86.
-        */
-       if (READ_ONCE(*byte) != val)
-               return;
+       /* Detect reentry. */
+       atomic_inc(nest_cnt);
 
-       /*
-        * If an interrupt happens here, it will leave the wakeup irq
-        * pending, which will cause xen_poll_irq() to return
-        * immediately.
-        */
+       /* If irq pending already and no nested call clear it. */
+       if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
+               xen_clear_irq_pending(irq);
+       } else if (READ_ONCE(*byte) == val) {
+               /* Block until irq becomes pending (or a spurious wakeup) */
+               xen_poll_irq(irq);
+       }
 
-       /* Block until irq becomes pending (or perhaps a spurious wakeup) */
-       xen_poll_irq(irq);
+       atomic_dec(nest_cnt);
 }
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
@@ -141,11 +129,12 @@ void __init xen_init_spinlocks(void)
        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
 
        __pv_init_lock_hash();
-       pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
-       pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
-       pv_lock_ops.wait = xen_qlock_wait;
-       pv_lock_ops.kick = xen_qlock_kick;
-       pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+       pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_ops.lock.queued_spin_unlock =
+               PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+       pv_ops.lock.wait = xen_qlock_wait;
+       pv_ops.lock.kick = xen_qlock_kick;
+       pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
 }
 
 static __init int xen_parse_nopvspin(char *arg)