powerpc/pseries: Move some PAPR paravirt functions to their own file
authorNicholas Piggin <npiggin@gmail.com>
Fri, 24 Jul 2020 13:14:18 +0000 (23:14 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 26 Jul 2020 13:34:26 +0000 (23:34 +1000)
These functions will be used by the queued spinlock implementation,
and may be useful elsewhere too, so move them out of spinlock.h.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200724131423.1362108-2-npiggin@gmail.com
arch/powerpc/include/asm/paravirt.h [new file with mode: 0644]
arch/powerpc/include/asm/spinlock.h
arch/powerpc/lib/locks.c

diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
new file mode 100644 (file)
index 0000000..339e853
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PARAVIRT_H
+#define _ASM_POWERPC_PARAVIRT_H
+
+#include <linux/jump_label.h>
+#include <asm/smp.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#endif
+
+#ifdef CONFIG_PPC_SPLPAR
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
+static inline bool is_shared_processor(void)
+{
+       return static_branch_unlikely(&shared_processor);
+}
+
+/* If bit 0 is set, the cpu has been preempted */
+static inline u32 yield_count_of(int cpu)
+{
+       __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
+       return be32_to_cpu(yield_count);
+}
+
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+}
+#else
+static inline bool is_shared_processor(void)
+{
+       return false;
+}
+
+static inline u32 yield_count_of(int cpu)
+{
+       return 0;
+}
+
+extern void ___bad_yield_to_preempted(void);
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+       ___bad_yield_to_preempted(); /* This would be a bug */
+}
+#endif
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+       if (!is_shared_processor())
+               return false;
+       if (yield_count_of(cpu) & 1)
+               return true;
+       return false;
+}
+
+#endif /* _ASM_POWERPC_PARAVIRT_H */
index 2d62089..79be9bb 100644 (file)
  *
  * (the type definitions are in asm/spinlock_types.h)
  */
-#include <linux/jump_label.h>
 #include <linux/irqflags.h>
+#include <asm/paravirt.h>
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
-#include <asm/hvcall.h>
 #endif
 #include <asm/synch.h>
 #include <asm/ppc-opcode.h>
 #define LOCK_TOKEN     1
 #endif
 
-#ifdef CONFIG_PPC_PSERIES
-DECLARE_STATIC_KEY_FALSE(shared_processor);
-
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
-{
-       if (!static_branch_unlikely(&shared_processor))
-               return false;
-       return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
-}
-#endif
-
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
        return lock.slock == 0;
@@ -110,15 +97,6 @@ static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
 #endif
 
-static inline bool is_shared_processor(void)
-{
-#ifdef CONFIG_PPC_SPLPAR
-       return static_branch_unlikely(&shared_processor);
-#else
-       return false;
-#endif
-}
-
 static inline void spin_yield(arch_spinlock_t *lock)
 {
        if (is_shared_processor())
index 6440d59..04165b7 100644 (file)
@@ -27,14 +27,14 @@ void splpar_spin_yield(arch_spinlock_t *lock)
                return;
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
+
+       yield_count = yield_count_of(holder_cpu);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
        if (lock->slock != lock_value)
                return;         /* something has changed */
-       plpar_hcall_norets(H_CONFER,
-               get_hard_smp_processor_id(holder_cpu), yield_count);
+       yield_to_preempted(holder_cpu, yield_count);
 }
 EXPORT_SYMBOL_GPL(splpar_spin_yield);
 
@@ -53,13 +53,13 @@ void splpar_rw_yield(arch_rwlock_t *rw)
                return;         /* no write lock at present */
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
+
+       yield_count = yield_count_of(holder_cpu);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
        if (rw->lock != lock_value)
                return;         /* something has changed */
-       plpar_hcall_norets(H_CONFER,
-               get_hard_smp_processor_id(holder_cpu), yield_count);
+       yield_to_preempted(holder_cpu, yield_count);
 }
 #endif