1 // SPDX-License-Identifier: GPL-2.0
5 * This file implements the Xen versions of smp_ops. SMP under Xen is
6 * very straightforward. Bringing a CPU up is simply a matter of
7 * loading its initial context and setting it running.
9 * IPIs are handled through the Xen event mechanism.
11 * Because virtual CPUs can be scheduled onto any real CPU, there's no
12 * useful topology information for the kernel to make use of. As a
13 * result, all CPUs are treated as if they're single-core and
16 #include <linux/sched.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/irq_work.h>
22 #include <linux/tick.h>
23 #include <linux/nmi.h>
24 #include <linux/cpuhotplug.h>
25 #include <linux/stackprotector.h>
27 #include <asm/paravirt.h>
29 #include <asm/pgtable.h>
32 #include <xen/interface/xen.h>
33 #include <xen/interface/vcpu.h>
34 #include <xen/interface/xenpmu.h>
36 #include <asm/spec-ctrl.h>
37 #include <asm/xen/interface.h>
38 #include <asm/xen/hypercall.h>
42 #include <xen/events.h>
44 #include <xen/hvc-console.h>
50 cpumask_var_t xen_cpu_initialized_map;
52 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
53 static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
55 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
57 static void cpu_bringup(void)
62 touch_softlockup_watchdog();
65 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
66 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
67 xen_enable_sysenter();
70 cpu = smp_processor_id();
71 smp_store_cpu_info(cpu);
72 cpu_data(cpu).x86_max_cores = 1;
73 set_cpu_sibling_map(cpu);
75 speculative_store_bypass_ht_init();
77 xen_setup_cpu_clockevents();
79 notify_cpu_starting(cpu);
81 set_cpu_online(cpu, true);
83 cpu_set_state_online(cpu); /* Implies full memory barrier. */
85 /* We can take interrupts now: we're officially "up". */
89 asmlinkage __visible void cpu_bringup_and_idle(void)
92 boot_init_stack_canary();
93 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
96 void xen_smp_intr_free_pv(unsigned int cpu)
98 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
99 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
100 per_cpu(xen_irq_work, cpu).irq = -1;
101 kfree(per_cpu(xen_irq_work, cpu).name);
102 per_cpu(xen_irq_work, cpu).name = NULL;
105 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
106 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
107 per_cpu(xen_pmu_irq, cpu).irq = -1;
108 kfree(per_cpu(xen_pmu_irq, cpu).name);
109 per_cpu(xen_pmu_irq, cpu).name = NULL;
113 int xen_smp_intr_init_pv(unsigned int cpu)
116 char *callfunc_name, *pmu_name;
118 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
119 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
121 xen_irq_work_interrupt,
122 IRQF_PERCPU|IRQF_NOBALANCING,
127 per_cpu(xen_irq_work, cpu).irq = rc;
128 per_cpu(xen_irq_work, cpu).name = callfunc_name;
130 if (is_xen_pmu(cpu)) {
131 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
132 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
134 IRQF_PERCPU|IRQF_NOBALANCING,
138 per_cpu(xen_pmu_irq, cpu).irq = rc;
139 per_cpu(xen_pmu_irq, cpu).name = pmu_name;
145 xen_smp_intr_free_pv(cpu);
149 static void __init xen_fill_possible_map(void)
153 if (xen_initial_domain())
156 for (i = 0; i < nr_cpu_ids; i++) {
157 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
160 set_cpu_possible(i, true);
165 static void __init xen_filter_cpu_maps(void)
168 unsigned int subtract = 0;
170 if (!xen_initial_domain())
175 for (i = 0; i < nr_cpu_ids; i++) {
176 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
179 set_cpu_possible(i, true);
181 set_cpu_possible(i, false);
182 set_cpu_present(i, false);
186 #ifdef CONFIG_HOTPLUG_CPU
187 /* This is akin to using 'nr_cpus' on the Linux command line.
188 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
189 * have up to X, while nr_cpu_ids is greater than X. This
190 * normally is not a problem, except when CPU hotplugging
191 * is involved and then there might be more than X CPUs
192 * in the guest - which will not work as there is no
193 * hypercall to expand the max number of VCPUs an already
194 * running guest has. So cap it up to X. */
196 nr_cpu_ids = nr_cpu_ids - subtract;
201 static void __init xen_pv_smp_prepare_boot_cpu(void)
203 BUG_ON(smp_processor_id() != 0);
204 native_smp_prepare_boot_cpu();
206 if (!xen_feature(XENFEAT_writable_page_tables))
207 /* We've switched to the "real" per-cpu gdt, so make
208 * sure the old memory can be recycled. */
209 make_lowmem_page_readwrite(xen_initial_gdt);
213 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
216 loadsegment(ds, __USER_DS);
217 loadsegment(es, __USER_DS);
220 xen_filter_cpu_maps();
221 xen_setup_vcpu_info_placement();
224 * The alternative logic (which patches the unlock/lock) runs before
225 * the smp bootup up code is activated. Hence we need to set this up
226 * the core kernel is being patched. Otherwise we will have only
227 * modules patched but not core code.
229 xen_init_spinlocks();
232 static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
237 if (skip_ioapic_setup) {
238 char *m = (max_cpus == 0) ?
239 "The nosmp parameter is incompatible with Xen; " \
240 "use Xen dom0_max_vcpus=1 parameter" :
241 "The noapic parameter is incompatible with Xen";
246 xen_init_lock_cpu(0);
248 smp_store_boot_cpu_info();
249 cpu_data(0).x86_max_cores = 1;
251 for_each_possible_cpu(i) {
252 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
253 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
254 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
256 set_cpu_sibling_map(0);
258 speculative_store_bypass_ht_init();
262 if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
265 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
266 panic("could not allocate xen_cpu_initialized_map\n");
268 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
270 /* Restrict the possible_map according to max_cpus. */
271 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
272 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
274 set_cpu_possible(cpu, false);
277 for_each_possible_cpu(cpu)
278 set_cpu_present(cpu, true);
282 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
284 struct vcpu_guest_context *ctxt;
285 struct desc_struct *gdt;
286 unsigned long gdt_mfn;
288 /* used to tell cpu_init() that it can proceed with initialization */
289 cpumask_set_cpu(cpu, cpu_callout_mask);
290 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
293 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
297 gdt = get_cpu_gdt_rw(cpu);
300 ctxt->user_regs.fs = __KERNEL_PERCPU;
301 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
303 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
306 * Bring up the CPU in cpu_bringup_and_idle() with the stack
307 * pointing just below where pt_regs would be if it were a normal
310 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
311 ctxt->flags = VGCF_IN_KERNEL;
312 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
313 ctxt->user_regs.ds = __USER_DS;
314 ctxt->user_regs.es = __USER_DS;
315 ctxt->user_regs.ss = __KERNEL_DS;
316 ctxt->user_regs.cs = __KERNEL_CS;
317 ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
319 xen_copy_trap_info(ctxt->trap_ctxt);
323 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
325 gdt_mfn = arbitrary_virt_to_mfn(gdt);
326 make_lowmem_page_readonly(gdt);
327 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
329 ctxt->gdt_frames[0] = gdt_mfn;
330 ctxt->gdt_ents = GDT_ENTRIES;
333 * Set SS:SP that Xen will use when entering guest kernel mode
334 * from guest user mode. Subsequent calls to load_sp0() can
337 ctxt->kernel_ss = __KERNEL_DS;
338 ctxt->kernel_sp = task_top_of_stack(idle);
341 ctxt->event_callback_cs = __KERNEL_CS;
342 ctxt->failsafe_callback_cs = __KERNEL_CS;
344 ctxt->gs_base_kernel = per_cpu_offset(cpu);
346 ctxt->event_callback_eip =
347 (unsigned long)xen_hypervisor_callback;
348 ctxt->failsafe_callback_eip =
349 (unsigned long)xen_failsafe_callback;
350 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
352 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
353 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
360 static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
364 common_cpu_up(cpu, idle);
366 xen_setup_runstate_info(cpu);
369 * PV VCPUs are always successfully taken down (see 'while' loop
370 * in xen_cpu_die()), so -EBUSY is an error.
372 rc = cpu_check_up_prepare(cpu);
376 /* make sure interrupts start blocked */
377 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
379 rc = cpu_initialize_context(cpu, idle);
385 rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
388 while (cpu_report_state(cpu) != CPU_ONLINE)
389 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
394 #ifdef CONFIG_HOTPLUG_CPU
395 static int xen_pv_cpu_disable(void)
397 unsigned int cpu = smp_processor_id();
401 cpu_disable_common();
403 load_cr3(swapper_pg_dir);
407 static void xen_pv_cpu_die(unsigned int cpu)
409 while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
410 xen_vcpu_nr(cpu), NULL)) {
411 __set_current_state(TASK_UNINTERRUPTIBLE);
412 schedule_timeout(HZ/10);
415 if (common_cpu_die(cpu) == 0) {
416 xen_smp_intr_free(cpu);
417 xen_uninit_lock_cpu(cpu);
418 xen_teardown_timer(cpu);
423 static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
426 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
429 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
430 * clears certain data that the cpu_idle loop (which called us
431 * and that we return from) expects. The only way to get that
432 * data back is to call:
434 tick_nohz_idle_enter();
435 tick_nohz_idle_stop_tick_protected();
437 cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
440 #else /* !CONFIG_HOTPLUG_CPU */
441 static int xen_pv_cpu_disable(void)
446 static void xen_pv_cpu_die(unsigned int cpu)
451 static void xen_pv_play_dead(void)
457 static void stop_self(void *v)
459 int cpu = smp_processor_id();
461 /* make sure we're not pinning something down */
462 load_cr3(swapper_pg_dir);
463 /* should set up a minimal gdt */
465 set_cpu_online(cpu, false);
467 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
471 static void xen_pv_stop_other_cpus(int wait)
473 smp_call_function(stop_self, NULL, wait);
476 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
480 inc_irq_stat(apic_irq_work_irqs);
486 static const struct smp_ops xen_smp_ops __initconst = {
487 .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
488 .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
489 .smp_cpus_done = xen_smp_cpus_done,
491 .cpu_up = xen_pv_cpu_up,
492 .cpu_die = xen_pv_cpu_die,
493 .cpu_disable = xen_pv_cpu_disable,
494 .play_dead = xen_pv_play_dead,
496 .stop_other_cpus = xen_pv_stop_other_cpus,
497 .smp_send_reschedule = xen_smp_send_reschedule,
499 .send_call_func_ipi = xen_smp_send_call_function_ipi,
500 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
503 void __init xen_smp_init(void)
505 smp_ops = xen_smp_ops;
506 xen_fill_possible_map();