1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Paravirtualization interfaces
3 Copyright (C) 2006 Rusty Russell IBM Corporation
6 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/efi.h>
13 #include <linux/bcd.h>
14 #include <linux/highmem.h>
15 #include <linux/kprobes.h>
16 #include <linux/pgtable.h>
17 #include <linux/static_call.h>
20 #include <asm/paravirt.h>
21 #include <asm/debugreg.h>
23 #include <asm/setup.h>
25 #include <asm/pgalloc.h>
27 #include <asm/delay.h>
28 #include <asm/fixmap.h>
30 #include <asm/tlbflush.h>
31 #include <asm/timer.h>
32 #include <asm/special_insns.h>
34 #include <asm/io_bitmap.h>
37 * nop stub, which must not clobber anything *including the stack* to
38 * avoid confusing the entry prologues.
40 extern void _paravirt_nop(void);
41 asm (".pushsection .entry.text, \"ax\"\n"
42 ".global _paravirt_nop\n"
45 ".size _paravirt_nop, . - _paravirt_nop\n\t"
46 ".type _paravirt_nop, @function\n\t"
49 /* stub always returning 0. */
50 asm (".pushsection .entry.text, \"ax\"\n"
51 ".global paravirt_ret0\n"
53 "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
55 ".size paravirt_ret0, . - paravirt_ret0\n\t"
56 ".type paravirt_ret0, @function\n\t"
60 void __init default_banner(void)
62 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
66 /* Undefined instruction for dealing with missing ops pointers. */
67 noinstr void paravirt_BUG(void)
75 } __attribute__((packed));
77 static unsigned paravirt_patch_call(void *insn_buff, const void *target,
78 unsigned long addr, unsigned len)
80 const int call_len = 5;
81 struct branch *b = insn_buff;
82 unsigned long delta = (unsigned long)target - (addr+call_len);
85 pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr);
86 /* Kernel might not be viable if patching fails, bail out: */
90 b->opcode = 0xe8; /* call */
92 BUILD_BUG_ON(sizeof(*b) != call_len);
97 #ifdef CONFIG_PARAVIRT_XXL
98 /* identity function, which can be inlined */
99 u64 notrace _paravirt_ident_64(u64 x)
105 DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
107 void __init native_pv_lock_init(void)
109 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
110 static_branch_disable(&virt_spin_lock_key);
113 unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
117 * Neat trick to map patch type back to the call within the
118 * corresponding structure.
120 void *opfunc = *((void **)&pv_ops + type);
124 /* If there's no function, patch it with paravirt_BUG() */
125 ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
126 else if (opfunc == _paravirt_nop)
129 /* Otherwise call the function. */
130 ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
135 struct static_key paravirt_steal_enabled;
136 struct static_key paravirt_steal_rq_enabled;
138 static u64 native_steal_clock(int cpu)
143 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
144 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
146 void paravirt_set_sched_clock(u64 (*func)(void))
148 static_call_update(pv_sched_clock, func);
151 /* These are in entry.S */
152 extern void native_iret(void);
154 static struct resource reserve_ioports = {
156 .end = IO_SPACE_LIMIT,
157 .name = "paravirt-ioport",
158 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
162 * Reserve the whole legacy IO space to prevent any legacy drivers
163 * from wasting time probing for their hardware. This is a fairly
164 * brute-force approach to disabling all non-virtual drivers.
166 * Note that this must be called very early to have any effect.
168 int paravirt_disable_iospace(void)
170 return request_resource(&ioport_resource, &reserve_ioports);
173 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
175 static inline void enter_lazy(enum paravirt_lazy_mode mode)
177 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
179 this_cpu_write(paravirt_lazy_mode, mode);
182 static void leave_lazy(enum paravirt_lazy_mode mode)
184 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
186 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
189 void paravirt_enter_lazy_mmu(void)
191 enter_lazy(PARAVIRT_LAZY_MMU);
194 void paravirt_leave_lazy_mmu(void)
196 leave_lazy(PARAVIRT_LAZY_MMU);
199 void paravirt_flush_lazy_mmu(void)
203 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
204 arch_leave_lazy_mmu_mode();
205 arch_enter_lazy_mmu_mode();
211 #ifdef CONFIG_PARAVIRT_XXL
212 void paravirt_start_context_switch(struct task_struct *prev)
214 BUG_ON(preemptible());
216 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
217 arch_leave_lazy_mmu_mode();
218 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
220 enter_lazy(PARAVIRT_LAZY_CPU);
223 void paravirt_end_context_switch(struct task_struct *next)
225 BUG_ON(preemptible());
227 leave_lazy(PARAVIRT_LAZY_CPU);
229 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
230 arch_enter_lazy_mmu_mode();
233 static noinstr unsigned long pv_native_read_cr2(void)
235 return native_read_cr2();
238 static noinstr void pv_native_write_cr2(unsigned long val)
240 native_write_cr2(val);
243 static noinstr unsigned long pv_native_get_debugreg(int regno)
245 return native_get_debugreg(regno);
248 static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
250 native_set_debugreg(regno, val);
253 static noinstr void pv_native_irq_enable(void)
258 static noinstr void pv_native_irq_disable(void)
260 native_irq_disable();
264 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
267 return PARAVIRT_LAZY_NONE;
269 return this_cpu_read(paravirt_lazy_mode);
272 struct pv_info pv_info = {
273 .name = "bare hardware",
274 #ifdef CONFIG_PARAVIRT_XXL
275 .extra_user_64bit_cs = __USER_CS,
279 /* 64-bit pagetable entries */
280 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
282 struct paravirt_patch_template pv_ops = {
284 .cpu.io_delay = native_io_delay,
286 #ifdef CONFIG_PARAVIRT_XXL
287 .cpu.cpuid = native_cpuid,
288 .cpu.get_debugreg = pv_native_get_debugreg,
289 .cpu.set_debugreg = pv_native_set_debugreg,
290 .cpu.read_cr0 = native_read_cr0,
291 .cpu.write_cr0 = native_write_cr0,
292 .cpu.write_cr4 = native_write_cr4,
293 .cpu.wbinvd = native_wbinvd,
294 .cpu.read_msr = native_read_msr,
295 .cpu.write_msr = native_write_msr,
296 .cpu.read_msr_safe = native_read_msr_safe,
297 .cpu.write_msr_safe = native_write_msr_safe,
298 .cpu.read_pmc = native_read_pmc,
299 .cpu.load_tr_desc = native_load_tr_desc,
300 .cpu.set_ldt = native_set_ldt,
301 .cpu.load_gdt = native_load_gdt,
302 .cpu.load_idt = native_load_idt,
303 .cpu.store_tr = native_store_tr,
304 .cpu.load_tls = native_load_tls,
305 .cpu.load_gs_index = native_load_gs_index,
306 .cpu.write_ldt_entry = native_write_ldt_entry,
307 .cpu.write_gdt_entry = native_write_gdt_entry,
308 .cpu.write_idt_entry = native_write_idt_entry,
310 .cpu.alloc_ldt = paravirt_nop,
311 .cpu.free_ldt = paravirt_nop,
313 .cpu.load_sp0 = native_load_sp0,
315 #ifdef CONFIG_X86_IOPL_IOPERM
316 .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
317 .cpu.update_io_bitmap = native_tss_update_io_bitmap,
320 .cpu.start_context_switch = paravirt_nop,
321 .cpu.end_context_switch = paravirt_nop,
324 .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
325 .irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
326 .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
327 .irq.safe_halt = native_safe_halt,
328 .irq.halt = native_halt,
329 #endif /* CONFIG_PARAVIRT_XXL */
332 .mmu.flush_tlb_user = native_flush_tlb_local,
333 .mmu.flush_tlb_kernel = native_flush_tlb_global,
334 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
335 .mmu.flush_tlb_multi = native_flush_tlb_multi,
336 .mmu.tlb_remove_table =
337 (void (*)(struct mmu_gather *, void *))tlb_remove_page,
339 .mmu.exit_mmap = paravirt_nop,
341 #ifdef CONFIG_PARAVIRT_XXL
342 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
343 .mmu.write_cr2 = pv_native_write_cr2,
344 .mmu.read_cr3 = __native_read_cr3,
345 .mmu.write_cr3 = native_write_cr3,
347 .mmu.pgd_alloc = __paravirt_pgd_alloc,
348 .mmu.pgd_free = paravirt_nop,
350 .mmu.alloc_pte = paravirt_nop,
351 .mmu.alloc_pmd = paravirt_nop,
352 .mmu.alloc_pud = paravirt_nop,
353 .mmu.alloc_p4d = paravirt_nop,
354 .mmu.release_pte = paravirt_nop,
355 .mmu.release_pmd = paravirt_nop,
356 .mmu.release_pud = paravirt_nop,
357 .mmu.release_p4d = paravirt_nop,
359 .mmu.set_pte = native_set_pte,
360 .mmu.set_pmd = native_set_pmd,
362 .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
363 .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
365 .mmu.set_pud = native_set_pud,
367 .mmu.pmd_val = PTE_IDENT,
368 .mmu.make_pmd = PTE_IDENT,
370 .mmu.pud_val = PTE_IDENT,
371 .mmu.make_pud = PTE_IDENT,
373 .mmu.set_p4d = native_set_p4d,
375 #if CONFIG_PGTABLE_LEVELS >= 5
376 .mmu.p4d_val = PTE_IDENT,
377 .mmu.make_p4d = PTE_IDENT,
379 .mmu.set_pgd = native_set_pgd,
380 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
382 .mmu.pte_val = PTE_IDENT,
383 .mmu.pgd_val = PTE_IDENT,
385 .mmu.make_pte = PTE_IDENT,
386 .mmu.make_pgd = PTE_IDENT,
388 .mmu.dup_mmap = paravirt_nop,
389 .mmu.activate_mm = paravirt_nop,
392 .enter = paravirt_nop,
393 .leave = paravirt_nop,
394 .flush = paravirt_nop,
397 .mmu.set_fixmap = native_set_fixmap,
398 #endif /* CONFIG_PARAVIRT_XXL */
400 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
403 .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
404 .lock.queued_spin_unlock =
405 PV_CALLEE_SAVE(__native_queued_spin_unlock),
406 .lock.wait = paravirt_nop,
407 .lock.kick = paravirt_nop,
408 .lock.vcpu_is_preempted =
409 PV_CALLEE_SAVE(__native_vcpu_is_preempted),
414 #ifdef CONFIG_PARAVIRT_XXL
415 NOKPROBE_SYMBOL(native_load_idt);
417 void (*paravirt_iret)(void) = native_iret;
420 EXPORT_SYMBOL(pv_ops);
421 EXPORT_SYMBOL_GPL(pv_info);