1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
5 #include <linux/jump_label.h>
9 #include <asm/hvcall.h>
12 #ifdef CONFIG_PPC_SPLPAR
13 #include <linux/smp.h>
14 #include <asm/kvm_guest.h>
15 #include <asm/cputhreads.h>
17 DECLARE_STATIC_KEY_FALSE(shared_processor);
19 static inline bool is_shared_processor(void)
21 return static_branch_unlikely(&shared_processor);
24 /* If bit 0 is set, the cpu has been preempted */
25 static inline u32 yield_count_of(int cpu)
27 __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
28 return be32_to_cpu(yield_count);
32 * Spinlock code confers and prods, so don't trace the hcalls because the
33 * tracing code takes spinlocks which can cause recursion deadlocks.
35 * These calls are made while the lock is not held: the lock slowpath yields if
36 * it can not acquire the lock, and unlock slow path might prod if a waiter has
37 * yielded). So this may not be a problem for simple spin locks because the
38 * tracing does not technically recurse on the lock, but we avoid it anyway.
40 * However the queued spin lock contended path is more strictly ordered: the
41 * H_CONFER hcall is made after the task has queued itself on the lock, so then
42 * recursing on that lock will cause the task to then queue up again behind the
43 * first instance (or worse: queued spinlocks use tricks that assume a context
44 * never waits on more than one spinlock, so such recursion may cause random
45 * corruption in the lock code).
47 static inline void yield_to_preempted(int cpu, u32 yield_count)
49 plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
52 static inline void prod_cpu(int cpu)
54 plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
57 static inline void yield_to_any(void)
59 plpar_hcall_norets_notrace(H_CONFER, -1, 0);
62 static inline bool is_shared_processor(void)
67 static inline u32 yield_count_of(int cpu)
72 extern void ___bad_yield_to_preempted(void);
73 static inline void yield_to_preempted(int cpu, u32 yield_count)
75 ___bad_yield_to_preempted(); /* This would be a bug */
78 extern void ___bad_yield_to_any(void);
79 static inline void yield_to_any(void)
81 ___bad_yield_to_any(); /* This would be a bug */
84 extern void ___bad_prod_cpu(void);
85 static inline void prod_cpu(int cpu)
87 ___bad_prod_cpu(); /* This would be a bug */
92 #define vcpu_is_preempted vcpu_is_preempted
93 static inline bool vcpu_is_preempted(int cpu)
95 if (!is_shared_processor())
98 #ifdef CONFIG_PPC_SPLPAR
99 if (!is_kvm_guest()) {
100 int first_cpu = cpu_first_thread_sibling(smp_processor_id());
103 * Preemption can only happen at core granularity. This CPU
104 * is not preempted if one of the CPU of this core is not
107 if (cpu_first_thread_sibling(cpu) == first_cpu)
112 if (yield_count_of(cpu) & 1)
117 static inline bool pv_is_native_spin_unlock(void)
119 return !is_shared_processor();
122 #endif /* _ASM_POWERPC_PARAVIRT_H */