1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/cpumask.h>
5 #include <asm/io_apic.h>
9 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
12 static int apic_ipi_shorthand_off __ro_after_init;
14 static __init int apic_ipi_shorthand(char *str)
16 get_option(&str, &apic_ipi_shorthand_off);
19 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
21 static int __init print_ipi_mode(void)
23 pr_info("IPI shorthand broadcast: %s\n",
24 apic_ipi_shorthand_off ? "disabled" : "enabled");
27 late_initcall(print_ipi_mode);
29 void apic_smt_update(void)
32 * Do not switch to broadcast mode if:
33 * - Disabled on the command line
34 * - Only a single CPU is online
35 * - Not all present CPUs have been at least booted once
37 * The latter is important as the local APIC might be in some
38 * random state and a broadcast might cause havoc. That's
39 * especially true for NMI broadcasting.
41 if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
42 !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
43 static_branch_disable(&apic_use_ipi_shorthand);
45 static_branch_enable(&apic_use_ipi_shorthand);
49 void apic_send_IPI_allbutself(unsigned int vector)
51 if (num_online_cpus() < 2)
54 if (static_branch_likely(&apic_use_ipi_shorthand))
55 apic->send_IPI_allbutself(vector);
57 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
61 * Send a 'reschedule' IPI to another CPU. It goes straight through and
62 * wastes no time serializing anything. Worst case is that we lose a
65 void native_smp_send_reschedule(int cpu)
67 if (unlikely(cpu_is_offline(cpu))) {
68 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
71 apic->send_IPI(cpu, RESCHEDULE_VECTOR);
74 void native_send_call_func_single_ipi(int cpu)
76 apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
79 void native_send_call_func_ipi(const struct cpumask *mask)
81 if (static_branch_likely(&apic_use_ipi_shorthand)) {
82 unsigned int cpu = smp_processor_id();
84 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
87 if (cpumask_test_cpu(cpu, mask))
88 apic->send_IPI_all(CALL_FUNCTION_VECTOR);
89 else if (num_online_cpus() > 1)
90 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
95 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
98 #endif /* CONFIG_SMP */
100 static inline int __prepare_ICR2(unsigned int mask)
102 return SET_APIC_DEST_FIELD(mask);
105 static inline void __xapic_wait_icr_idle(void)
107 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
111 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
114 * Subtle. In the case of the 'never do double writes' workaround
115 * we have to lock out interrupts to be safe. As we don't care
116 * of the value read we use an atomic rmw access to avoid costly
117 * cli/sti. Otherwise we use an even cheaper single atomic write
125 if (unlikely(vector == NMI_VECTOR))
126 safe_apic_wait_icr_idle();
128 __xapic_wait_icr_idle();
131 * No need to touch the target chip field. Also the destination
132 * mode is ignored when a shorthand is used.
134 cfg = __prepare_ICR(shortcut, vector, 0);
137 * Send the IPI. The write to APIC_ICR fires this off.
139 native_apic_mem_write(APIC_ICR, cfg);
143 * This is used to send an IPI with no shorthand notation (the destination is
144 * specified in bits 56 to 63 of the ICR).
146 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
153 if (unlikely(vector == NMI_VECTOR))
154 safe_apic_wait_icr_idle();
156 __xapic_wait_icr_idle();
159 * prepare target chip field
161 cfg = __prepare_ICR2(mask);
162 native_apic_mem_write(APIC_ICR2, cfg);
167 cfg = __prepare_ICR(0, vector, dest);
170 * Send the IPI. The write to APIC_ICR fires this off.
172 native_apic_mem_write(APIC_ICR, cfg);
175 void default_send_IPI_single_phys(int cpu, int vector)
179 local_irq_save(flags);
180 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181 vector, APIC_DEST_PHYSICAL);
182 local_irq_restore(flags);
185 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
187 unsigned long query_cpu;
191 * Hack. The clustered APIC addressing mode doesn't allow us to send
192 * to an arbitrary mask, so I do a unicast to each CPU instead.
195 local_irq_save(flags);
196 for_each_cpu(query_cpu, mask) {
197 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198 query_cpu), vector, APIC_DEST_PHYSICAL);
200 local_irq_restore(flags);
203 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
206 unsigned int this_cpu = smp_processor_id();
207 unsigned int query_cpu;
210 /* See Hack comment above */
212 local_irq_save(flags);
213 for_each_cpu(query_cpu, mask) {
214 if (query_cpu == this_cpu)
216 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217 query_cpu), vector, APIC_DEST_PHYSICAL);
219 local_irq_restore(flags);
223 * Helper function for APICs which insist on cpumasks
225 void default_send_IPI_single(int cpu, int vector)
227 apic->send_IPI_mask(cpumask_of(cpu), vector);
230 void default_send_IPI_allbutself(int vector)
232 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
235 void default_send_IPI_all(int vector)
237 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
240 void default_send_IPI_self(int vector)
242 __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
247 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
251 unsigned int query_cpu;
254 * Hack. The clustered APIC addressing mode doesn't allow us to send
255 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
256 * should be modified to do 1 message per cluster ID - mbligh
259 local_irq_save(flags);
260 for_each_cpu(query_cpu, mask)
261 __default_send_IPI_dest_field(
262 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263 vector, APIC_DEST_LOGICAL);
264 local_irq_restore(flags);
267 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
271 unsigned int query_cpu;
272 unsigned int this_cpu = smp_processor_id();
274 /* See Hack comment above */
276 local_irq_save(flags);
277 for_each_cpu(query_cpu, mask) {
278 if (query_cpu == this_cpu)
280 __default_send_IPI_dest_field(
281 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282 vector, APIC_DEST_LOGICAL);
284 local_irq_restore(flags);
288 * This is only used on smaller machines.
290 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
292 unsigned long mask = cpumask_bits(cpumask)[0];
298 local_irq_save(flags);
299 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300 __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
301 local_irq_restore(flags);
304 /* must come after the send_IPI functions above for inlining */
305 static int convert_apicid_to_cpu(int apic_id)
309 for_each_possible_cpu(i) {
310 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
316 int safe_smp_processor_id(void)
320 if (!boot_cpu_has(X86_FEATURE_APIC))
323 apicid = hard_smp_processor_id();
324 if (apicid == BAD_APICID)
327 cpuid = convert_apicid_to_cpu(apicid);
329 return cpuid >= 0 ? cpuid : 0;