1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_CPUTHREADS_H
3 #define _ASM_POWERPC_CPUTHREADS_H
6 #include <linux/cpumask.h>
7 #include <asm/cpu_has_feature.h>
10 * Mapping of threads to cores
12 * Note: This implementation is limited to a power of 2 number of
13 * threads per core and the same number for each core in the system
14 * (though it would work if some processors had less threads as long
15 * as the CPU numbers are still allocated, just not brought online).
17 * However, the API allows for a different implementation in the future
18 * if needed, as long as you only use the functions and not the variables
23 extern int threads_per_core;
24 extern int threads_per_subcore;
25 extern int threads_shift;
26 extern cpumask_t threads_core_mask;
28 #define threads_per_core 1
29 #define threads_per_subcore 1
30 #define threads_shift 0
31 #define has_big_cores 0
32 #define threads_core_mask (*get_cpu_mask(0))
35 /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
38 * @threads: a cpumask of online threads
40 * This function returns a cpumask which will have one online cpu's
41 * bit set for each core that has at least one thread set in the argument.
43 * This can typically be used for things like IPI for tlb invalidations
44 * since those need to be done only once per core/TLB
46 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
52 for (i = 0; i < NR_CPUS; i += threads_per_core) {
53 cpumask_shift_left(&tmp, &threads_core_mask, i);
54 if (cpumask_intersects(threads, &tmp)) {
55 cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
57 cpumask_set_cpu(cpu, &res);
63 static inline int cpu_nr_cores(void)
65 return nr_cpu_ids >> threads_shift;
68 static inline cpumask_t cpu_online_cores_map(void)
70 return cpu_thread_mask_to_cores(cpu_online_mask);
74 int cpu_core_index_of_thread(int cpu);
75 int cpu_first_thread_of_core(int core);
77 static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
78 static inline int cpu_first_thread_of_core(int core) { return core; }
81 static inline int cpu_thread_in_core(int cpu)
83 return cpu & (threads_per_core - 1);
86 static inline int cpu_thread_in_subcore(int cpu)
88 return cpu & (threads_per_subcore - 1);
91 static inline int cpu_first_thread_sibling(int cpu)
93 return cpu & ~(threads_per_core - 1);
96 static inline int cpu_last_thread_sibling(int cpu)
98 return cpu | (threads_per_core - 1);
102 * tlb_thread_siblings are siblings which share a TLB. This is not
103 * architected, is not something a hypervisor could emulate and a future
104 * CPU may change behaviour even in compat mode, so this should only be
105 * used on PowerNV, and only with care.
107 static inline int cpu_first_tlb_thread_sibling(int cpu)
109 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
110 return cpu & ~0x6; /* Big Core */
112 return cpu_first_thread_sibling(cpu);
115 static inline int cpu_last_tlb_thread_sibling(int cpu)
117 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
118 return cpu | 0x6; /* Big Core */
120 return cpu_last_thread_sibling(cpu);
123 static inline int cpu_tlb_thread_sibling_step(void)
125 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
126 return 2; /* Big Core */
131 static inline u32 get_tensr(void)
134 if (cpu_has_feature(CPU_FTR_SMT))
135 return mfspr(SPRN_TENSR);
140 void book3e_start_thread(int thread, unsigned long addr);
141 void book3e_stop_thread(int thread);
143 #endif /* __ASSEMBLY__ */
145 #define INVALID_THREAD_HWID 0x0fff
147 #endif /* _ASM_POWERPC_CPUTHREADS_H */