x86/paravirt: Move the Xen-only pv_mmu_ops under the PARAVIRT_XXL umbrella
[linux-2.6-microblaze.git] / arch / x86 / include / asm / paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6
7 #ifdef CONFIG_PARAVIRT
8 #include <asm/pgtable_types.h>
9 #include <asm/asm.h>
10 #include <asm/nospec-branch.h>
11
12 #include <asm/paravirt_types.h>
13
14 #ifndef __ASSEMBLY__
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <asm/frame.h>
19
20 static inline unsigned long long paravirt_sched_clock(void)
21 {
22         return PVOP_CALL0(unsigned long long, time.sched_clock);
23 }
24
25 struct static_key;
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
28
29 static inline u64 paravirt_steal_clock(int cpu)
30 {
31         return PVOP_CALL1(u64, time.steal_clock, cpu);
32 }
33
34 /* The paravirtualized I/O functions */
35 static inline void slow_down_io(void)
36 {
37         pv_ops.cpu.io_delay();
38 #ifdef REALLY_SLOW_IO
39         pv_ops.cpu.io_delay();
40         pv_ops.cpu.io_delay();
41         pv_ops.cpu.io_delay();
42 #endif
43 }
44
45 static inline void __flush_tlb(void)
46 {
47         PVOP_VCALL0(mmu.flush_tlb_user);
48 }
49
50 static inline void __flush_tlb_global(void)
51 {
52         PVOP_VCALL0(mmu.flush_tlb_kernel);
53 }
54
55 static inline void __flush_tlb_one_user(unsigned long addr)
56 {
57         PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
58 }
59
60 static inline void flush_tlb_others(const struct cpumask *cpumask,
61                                     const struct flush_tlb_info *info)
62 {
63         PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
64 }
65
66 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
67 {
68         PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
69 }
70
71 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
72 {
73         PVOP_VCALL1(mmu.exit_mmap, mm);
74 }
75
76 #ifdef CONFIG_PARAVIRT_XXL
77 static inline void load_sp0(unsigned long sp0)
78 {
79         PVOP_VCALL1(cpu.load_sp0, sp0);
80 }
81
82 /* The paravirtualized CPUID instruction. */
83 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
84                            unsigned int *ecx, unsigned int *edx)
85 {
86         PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
87 }
88
89 /*
90  * These special macros can be used to get or set a debugging register
91  */
92 static inline unsigned long paravirt_get_debugreg(int reg)
93 {
94         return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
95 }
96 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
97 static inline void set_debugreg(unsigned long val, int reg)
98 {
99         PVOP_VCALL2(cpu.set_debugreg, reg, val);
100 }
101
102 static inline unsigned long read_cr0(void)
103 {
104         return PVOP_CALL0(unsigned long, cpu.read_cr0);
105 }
106
107 static inline void write_cr0(unsigned long x)
108 {
109         PVOP_VCALL1(cpu.write_cr0, x);
110 }
111
112 static inline unsigned long read_cr2(void)
113 {
114         return PVOP_CALL0(unsigned long, mmu.read_cr2);
115 }
116
117 static inline void write_cr2(unsigned long x)
118 {
119         PVOP_VCALL1(mmu.write_cr2, x);
120 }
121
122 static inline unsigned long __read_cr3(void)
123 {
124         return PVOP_CALL0(unsigned long, mmu.read_cr3);
125 }
126
127 static inline void write_cr3(unsigned long x)
128 {
129         PVOP_VCALL1(mmu.write_cr3, x);
130 }
131
132 static inline void __write_cr4(unsigned long x)
133 {
134         PVOP_VCALL1(cpu.write_cr4, x);
135 }
136
137 #ifdef CONFIG_X86_64
138 static inline unsigned long read_cr8(void)
139 {
140         return PVOP_CALL0(unsigned long, cpu.read_cr8);
141 }
142
143 static inline void write_cr8(unsigned long x)
144 {
145         PVOP_VCALL1(cpu.write_cr8, x);
146 }
147 #endif
148
149 static inline void arch_safe_halt(void)
150 {
151         PVOP_VCALL0(irq.safe_halt);
152 }
153
154 static inline void halt(void)
155 {
156         PVOP_VCALL0(irq.halt);
157 }
158
159 static inline void wbinvd(void)
160 {
161         PVOP_VCALL0(cpu.wbinvd);
162 }
163
164 #define get_kernel_rpl()  (pv_info.kernel_rpl)
165
166 static inline u64 paravirt_read_msr(unsigned msr)
167 {
168         return PVOP_CALL1(u64, cpu.read_msr, msr);
169 }
170
171 static inline void paravirt_write_msr(unsigned msr,
172                                       unsigned low, unsigned high)
173 {
174         PVOP_VCALL3(cpu.write_msr, msr, low, high);
175 }
176
177 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
178 {
179         return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
180 }
181
182 static inline int paravirt_write_msr_safe(unsigned msr,
183                                           unsigned low, unsigned high)
184 {
185         return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
186 }
187
188 #define rdmsr(msr, val1, val2)                  \
189 do {                                            \
190         u64 _l = paravirt_read_msr(msr);        \
191         val1 = (u32)_l;                         \
192         val2 = _l >> 32;                        \
193 } while (0)
194
195 #define wrmsr(msr, val1, val2)                  \
196 do {                                            \
197         paravirt_write_msr(msr, val1, val2);    \
198 } while (0)
199
200 #define rdmsrl(msr, val)                        \
201 do {                                            \
202         val = paravirt_read_msr(msr);           \
203 } while (0)
204
205 static inline void wrmsrl(unsigned msr, u64 val)
206 {
207         wrmsr(msr, (u32)val, (u32)(val>>32));
208 }
209
210 #define wrmsr_safe(msr, a, b)   paravirt_write_msr_safe(msr, a, b)
211
212 /* rdmsr with exception handling */
213 #define rdmsr_safe(msr, a, b)                           \
214 ({                                                      \
215         int _err;                                       \
216         u64 _l = paravirt_read_msr_safe(msr, &_err);    \
217         (*a) = (u32)_l;                                 \
218         (*b) = _l >> 32;                                \
219         _err;                                           \
220 })
221
222 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
223 {
224         int err;
225
226         *p = paravirt_read_msr_safe(msr, &err);
227         return err;
228 }
229
230 static inline unsigned long long paravirt_read_pmc(int counter)
231 {
232         return PVOP_CALL1(u64, cpu.read_pmc, counter);
233 }
234
235 #define rdpmc(counter, low, high)               \
236 do {                                            \
237         u64 _l = paravirt_read_pmc(counter);    \
238         low = (u32)_l;                          \
239         high = _l >> 32;                        \
240 } while (0)
241
242 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
243
244 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
245 {
246         PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
247 }
248
249 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
250 {
251         PVOP_VCALL2(cpu.free_ldt, ldt, entries);
252 }
253
254 static inline void load_TR_desc(void)
255 {
256         PVOP_VCALL0(cpu.load_tr_desc);
257 }
258 static inline void load_gdt(const struct desc_ptr *dtr)
259 {
260         PVOP_VCALL1(cpu.load_gdt, dtr);
261 }
262 static inline void load_idt(const struct desc_ptr *dtr)
263 {
264         PVOP_VCALL1(cpu.load_idt, dtr);
265 }
266 static inline void set_ldt(const void *addr, unsigned entries)
267 {
268         PVOP_VCALL2(cpu.set_ldt, addr, entries);
269 }
270 static inline unsigned long paravirt_store_tr(void)
271 {
272         return PVOP_CALL0(unsigned long, cpu.store_tr);
273 }
274
275 #define store_tr(tr)    ((tr) = paravirt_store_tr())
276 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
277 {
278         PVOP_VCALL2(cpu.load_tls, t, cpu);
279 }
280
281 #ifdef CONFIG_X86_64
282 static inline void load_gs_index(unsigned int gs)
283 {
284         PVOP_VCALL1(cpu.load_gs_index, gs);
285 }
286 #endif
287
288 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
289                                    const void *desc)
290 {
291         PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
292 }
293
294 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
295                                    void *desc, int type)
296 {
297         PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
298 }
299
300 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
301 {
302         PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
303 }
304 static inline void set_iopl_mask(unsigned mask)
305 {
306         PVOP_VCALL1(cpu.set_iopl_mask, mask);
307 }
308
309 static inline void paravirt_activate_mm(struct mm_struct *prev,
310                                         struct mm_struct *next)
311 {
312         PVOP_VCALL2(mmu.activate_mm, prev, next);
313 }
314
315 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
316                                           struct mm_struct *mm)
317 {
318         PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
319 }
320
321 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
322 {
323         return PVOP_CALL1(int, mmu.pgd_alloc, mm);
324 }
325
326 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
327 {
328         PVOP_VCALL2(mmu.pgd_free, mm, pgd);
329 }
330
331 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
332 {
333         PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
334 }
335 static inline void paravirt_release_pte(unsigned long pfn)
336 {
337         PVOP_VCALL1(mmu.release_pte, pfn);
338 }
339
340 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
341 {
342         PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
343 }
344
345 static inline void paravirt_release_pmd(unsigned long pfn)
346 {
347         PVOP_VCALL1(mmu.release_pmd, pfn);
348 }
349
350 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
351 {
352         PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
353 }
354 static inline void paravirt_release_pud(unsigned long pfn)
355 {
356         PVOP_VCALL1(mmu.release_pud, pfn);
357 }
358
359 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
360 {
361         PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
362 }
363
364 static inline void paravirt_release_p4d(unsigned long pfn)
365 {
366         PVOP_VCALL1(mmu.release_p4d, pfn);
367 }
368
369 static inline pte_t __pte(pteval_t val)
370 {
371         pteval_t ret;
372
373         if (sizeof(pteval_t) > sizeof(long))
374                 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
375         else
376                 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
377
378         return (pte_t) { .pte = ret };
379 }
380
381 static inline pteval_t pte_val(pte_t pte)
382 {
383         pteval_t ret;
384
385         if (sizeof(pteval_t) > sizeof(long))
386                 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
387                                    pte.pte, (u64)pte.pte >> 32);
388         else
389                 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
390
391         return ret;
392 }
393
394 static inline pgd_t __pgd(pgdval_t val)
395 {
396         pgdval_t ret;
397
398         if (sizeof(pgdval_t) > sizeof(long))
399                 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
400         else
401                 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
402
403         return (pgd_t) { ret };
404 }
405
406 static inline pgdval_t pgd_val(pgd_t pgd)
407 {
408         pgdval_t ret;
409
410         if (sizeof(pgdval_t) > sizeof(long))
411                 ret =  PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
412                                     pgd.pgd, (u64)pgd.pgd >> 32);
413         else
414                 ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
415
416         return ret;
417 }
418
419 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
420 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
421                                            pte_t *ptep)
422 {
423         pteval_t ret;
424
425         ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep);
426
427         return (pte_t) { .pte = ret };
428 }
429
430 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
431                                            pte_t *ptep, pte_t pte)
432 {
433         if (sizeof(pteval_t) > sizeof(long))
434                 /* 5 arg words */
435                 pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte);
436         else
437                 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
438                             mm, addr, ptep, pte.pte);
439 }
440
441 static inline void set_pte(pte_t *ptep, pte_t pte)
442 {
443         if (sizeof(pteval_t) > sizeof(long))
444                 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
445         else
446                 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
447 }
448
449 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
450                               pte_t *ptep, pte_t pte)
451 {
452         if (sizeof(pteval_t) > sizeof(long))
453                 /* 5 arg words */
454                 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
455         else
456                 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
457 }
458
459 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
460 {
461         pmdval_t val = native_pmd_val(pmd);
462
463         if (sizeof(pmdval_t) > sizeof(long))
464                 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
465         else
466                 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
467 }
468
469 #if CONFIG_PGTABLE_LEVELS >= 3
470 static inline pmd_t __pmd(pmdval_t val)
471 {
472         pmdval_t ret;
473
474         if (sizeof(pmdval_t) > sizeof(long))
475                 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
476         else
477                 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
478
479         return (pmd_t) { ret };
480 }
481
482 static inline pmdval_t pmd_val(pmd_t pmd)
483 {
484         pmdval_t ret;
485
486         if (sizeof(pmdval_t) > sizeof(long))
487                 ret =  PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
488                                     pmd.pmd, (u64)pmd.pmd >> 32);
489         else
490                 ret =  PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
491
492         return ret;
493 }
494
495 static inline void set_pud(pud_t *pudp, pud_t pud)
496 {
497         pudval_t val = native_pud_val(pud);
498
499         if (sizeof(pudval_t) > sizeof(long))
500                 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
501         else
502                 PVOP_VCALL2(mmu.set_pud, pudp, val);
503 }
504 #if CONFIG_PGTABLE_LEVELS >= 4
505 static inline pud_t __pud(pudval_t val)
506 {
507         pudval_t ret;
508
509         if (sizeof(pudval_t) > sizeof(long))
510                 ret = PVOP_CALLEE2(pudval_t, mmu.make_pud, val, (u64)val >> 32);
511         else
512                 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
513
514         return (pud_t) { ret };
515 }
516
517 static inline pudval_t pud_val(pud_t pud)
518 {
519         pudval_t ret;
520
521         if (sizeof(pudval_t) > sizeof(long))
522                 ret =  PVOP_CALLEE2(pudval_t, mmu.pud_val,
523                                     pud.pud, (u64)pud.pud >> 32);
524         else
525                 ret =  PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
526
527         return ret;
528 }
529
530 static inline void pud_clear(pud_t *pudp)
531 {
532         set_pud(pudp, __pud(0));
533 }
534
535 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
536 {
537         p4dval_t val = native_p4d_val(p4d);
538
539         if (sizeof(p4dval_t) > sizeof(long))
540                 PVOP_VCALL3(mmu.set_p4d, p4dp, val, (u64)val >> 32);
541         else
542                 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
543 }
544
545 #if CONFIG_PGTABLE_LEVELS >= 5
546
547 static inline p4d_t __p4d(p4dval_t val)
548 {
549         p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
550
551         return (p4d_t) { ret };
552 }
553
554 static inline p4dval_t p4d_val(p4d_t p4d)
555 {
556         return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
557 }
558
559 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
560 {
561         PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
562 }
563
564 #define set_pgd(pgdp, pgdval) do {                                      \
565         if (pgtable_l5_enabled())                                               \
566                 __set_pgd(pgdp, pgdval);                                \
567         else                                                            \
568                 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });     \
569 } while (0)
570
571 #define pgd_clear(pgdp) do {                                            \
572         if (pgtable_l5_enabled())                                               \
573                 set_pgd(pgdp, __pgd(0));                                \
574 } while (0)
575
576 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
577
578 static inline void p4d_clear(p4d_t *p4dp)
579 {
580         set_p4d(p4dp, __p4d(0));
581 }
582
583 #endif  /* CONFIG_PGTABLE_LEVELS == 4 */
584
585 #endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
586
587 #ifdef CONFIG_X86_PAE
588 /* Special-case pte-setting operations for PAE, which can't update a
589    64-bit pte atomically */
590 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
591 {
592         PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
593 }
594
595 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
596                              pte_t *ptep)
597 {
598         PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
599 }
600
601 static inline void pmd_clear(pmd_t *pmdp)
602 {
603         PVOP_VCALL1(mmu.pmd_clear, pmdp);
604 }
605 #else  /* !CONFIG_X86_PAE */
606 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
607 {
608         set_pte(ptep, pte);
609 }
610
611 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
612                              pte_t *ptep)
613 {
614         set_pte_at(mm, addr, ptep, __pte(0));
615 }
616
617 static inline void pmd_clear(pmd_t *pmdp)
618 {
619         set_pmd(pmdp, __pmd(0));
620 }
621 #endif  /* CONFIG_X86_PAE */
622
623 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
624 static inline void arch_start_context_switch(struct task_struct *prev)
625 {
626         PVOP_VCALL1(cpu.start_context_switch, prev);
627 }
628
629 static inline void arch_end_context_switch(struct task_struct *next)
630 {
631         PVOP_VCALL1(cpu.end_context_switch, next);
632 }
633
634 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
635 static inline void arch_enter_lazy_mmu_mode(void)
636 {
637         PVOP_VCALL0(mmu.lazy_mode.enter);
638 }
639
640 static inline void arch_leave_lazy_mmu_mode(void)
641 {
642         PVOP_VCALL0(mmu.lazy_mode.leave);
643 }
644
645 static inline void arch_flush_lazy_mmu_mode(void)
646 {
647         PVOP_VCALL0(mmu.lazy_mode.flush);
648 }
649
650 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
651                                 phys_addr_t phys, pgprot_t flags)
652 {
653         pv_ops.mmu.set_fixmap(idx, phys, flags);
654 }
655 #endif
656
657 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
658
659 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
660                                                         u32 val)
661 {
662         PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
663 }
664
665 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
666 {
667         PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
668 }
669
670 static __always_inline void pv_wait(u8 *ptr, u8 val)
671 {
672         PVOP_VCALL2(lock.wait, ptr, val);
673 }
674
675 static __always_inline void pv_kick(int cpu)
676 {
677         PVOP_VCALL1(lock.kick, cpu);
678 }
679
680 static __always_inline bool pv_vcpu_is_preempted(long cpu)
681 {
682         return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
683 }
684
685 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
686 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
687
688 #endif /* SMP && PARAVIRT_SPINLOCKS */
689
690 #ifdef CONFIG_X86_32
691 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
692 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
693
694 /* save and restore all caller-save registers, except return value */
695 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
696 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
697
698 #define PV_FLAGS_ARG "0"
699 #define PV_EXTRA_CLOBBERS
700 #define PV_VEXTRA_CLOBBERS
701 #else
702 /* save and restore all caller-save registers, except return value */
703 #define PV_SAVE_ALL_CALLER_REGS                                         \
704         "push %rcx;"                                                    \
705         "push %rdx;"                                                    \
706         "push %rsi;"                                                    \
707         "push %rdi;"                                                    \
708         "push %r8;"                                                     \
709         "push %r9;"                                                     \
710         "push %r10;"                                                    \
711         "push %r11;"
712 #define PV_RESTORE_ALL_CALLER_REGS                                      \
713         "pop %r11;"                                                     \
714         "pop %r10;"                                                     \
715         "pop %r9;"                                                      \
716         "pop %r8;"                                                      \
717         "pop %rdi;"                                                     \
718         "pop %rsi;"                                                     \
719         "pop %rdx;"                                                     \
720         "pop %rcx;"
721
722 /* We save some registers, but all of them, that's too much. We clobber all
723  * caller saved registers but the argument parameter */
724 #define PV_SAVE_REGS "pushq %%rdi;"
725 #define PV_RESTORE_REGS "popq %%rdi;"
726 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
727 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
728 #define PV_FLAGS_ARG "D"
729 #endif
730
731 /*
732  * Generate a thunk around a function which saves all caller-save
733  * registers except for the return value.  This allows C functions to
734  * be called from assembler code where fewer than normal registers are
735  * available.  It may also help code generation around calls from C
736  * code if the common case doesn't use many registers.
737  *
738  * When a callee is wrapped in a thunk, the caller can assume that all
739  * arg regs and all scratch registers are preserved across the
740  * call. The return value in rax/eax will not be saved, even for void
741  * functions.
742  */
743 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
744 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
745         extern typeof(func) __raw_callee_save_##func;                   \
746                                                                         \
747         asm(".pushsection .text;"                                       \
748             ".globl " PV_THUNK_NAME(func) ";"                           \
749             ".type " PV_THUNK_NAME(func) ", @function;"                 \
750             PV_THUNK_NAME(func) ":"                                     \
751             FRAME_BEGIN                                                 \
752             PV_SAVE_ALL_CALLER_REGS                                     \
753             "call " #func ";"                                           \
754             PV_RESTORE_ALL_CALLER_REGS                                  \
755             FRAME_END                                                   \
756             "ret;"                                                      \
757             ".popsection")
758
759 /* Get a reference to a callee-save function */
760 #define PV_CALLEE_SAVE(func)                                            \
761         ((struct paravirt_callee_save) { __raw_callee_save_##func })
762
763 /* Promise that "func" already uses the right calling convention */
764 #define __PV_IS_CALLEE_SAVE(func)                       \
765         ((struct paravirt_callee_save) { func })
766
767 #ifdef CONFIG_PARAVIRT_XXL
768 static inline notrace unsigned long arch_local_save_flags(void)
769 {
770         return PVOP_CALLEE0(unsigned long, irq.save_fl);
771 }
772
773 static inline notrace void arch_local_irq_restore(unsigned long f)
774 {
775         PVOP_VCALLEE1(irq.restore_fl, f);
776 }
777
778 static inline notrace void arch_local_irq_disable(void)
779 {
780         PVOP_VCALLEE0(irq.irq_disable);
781 }
782
783 static inline notrace void arch_local_irq_enable(void)
784 {
785         PVOP_VCALLEE0(irq.irq_enable);
786 }
787
788 static inline notrace unsigned long arch_local_irq_save(void)
789 {
790         unsigned long f;
791
792         f = arch_local_save_flags();
793         arch_local_irq_disable();
794         return f;
795 }
796 #endif
797
798
799 /* Make sure as little as possible of this mess escapes. */
800 #undef PARAVIRT_CALL
801 #undef __PVOP_CALL
802 #undef __PVOP_VCALL
803 #undef PVOP_VCALL0
804 #undef PVOP_CALL0
805 #undef PVOP_VCALL1
806 #undef PVOP_CALL1
807 #undef PVOP_VCALL2
808 #undef PVOP_CALL2
809 #undef PVOP_VCALL3
810 #undef PVOP_CALL3
811 #undef PVOP_VCALL4
812 #undef PVOP_CALL4
813
814 extern void default_banner(void);
815
816 #else  /* __ASSEMBLY__ */
817
818 #define _PVSITE(ptype, ops, word, algn)         \
819 771:;                                           \
820         ops;                                    \
821 772:;                                           \
822         .pushsection .parainstructions,"a";     \
823          .align algn;                           \
824          word 771b;                             \
825          .byte ptype;                           \
826          .byte 772b-771b;                       \
827         .popsection
828
829
830 #define COND_PUSH(set, mask, reg)                       \
831         .if ((~(set)) & mask); push %reg; .endif
832 #define COND_POP(set, mask, reg)                        \
833         .if ((~(set)) & mask); pop %reg; .endif
834
835 #ifdef CONFIG_X86_64
836
837 #define PV_SAVE_REGS(set)                       \
838         COND_PUSH(set, CLBR_RAX, rax);          \
839         COND_PUSH(set, CLBR_RCX, rcx);          \
840         COND_PUSH(set, CLBR_RDX, rdx);          \
841         COND_PUSH(set, CLBR_RSI, rsi);          \
842         COND_PUSH(set, CLBR_RDI, rdi);          \
843         COND_PUSH(set, CLBR_R8, r8);            \
844         COND_PUSH(set, CLBR_R9, r9);            \
845         COND_PUSH(set, CLBR_R10, r10);          \
846         COND_PUSH(set, CLBR_R11, r11)
847 #define PV_RESTORE_REGS(set)                    \
848         COND_POP(set, CLBR_R11, r11);           \
849         COND_POP(set, CLBR_R10, r10);           \
850         COND_POP(set, CLBR_R9, r9);             \
851         COND_POP(set, CLBR_R8, r8);             \
852         COND_POP(set, CLBR_RDI, rdi);           \
853         COND_POP(set, CLBR_RSI, rsi);           \
854         COND_POP(set, CLBR_RDX, rdx);           \
855         COND_POP(set, CLBR_RCX, rcx);           \
856         COND_POP(set, CLBR_RAX, rax)
857
858 #define PARA_PATCH(off)         ((off) / 8)
859 #define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .quad, 8)
860 #define PARA_INDIRECT(addr)     *addr(%rip)
861 #else
862 #define PV_SAVE_REGS(set)                       \
863         COND_PUSH(set, CLBR_EAX, eax);          \
864         COND_PUSH(set, CLBR_EDI, edi);          \
865         COND_PUSH(set, CLBR_ECX, ecx);          \
866         COND_PUSH(set, CLBR_EDX, edx)
867 #define PV_RESTORE_REGS(set)                    \
868         COND_POP(set, CLBR_EDX, edx);           \
869         COND_POP(set, CLBR_ECX, ecx);           \
870         COND_POP(set, CLBR_EDI, edi);           \
871         COND_POP(set, CLBR_EAX, eax)
872
873 #define PARA_PATCH(off)         ((off) / 4)
874 #define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .long, 4)
875 #define PARA_INDIRECT(addr)     *%cs:addr
876 #endif
877
878 #ifdef CONFIG_PARAVIRT_XXL
879 #define INTERRUPT_RETURN                                                \
880         PARA_SITE(PARA_PATCH(PV_CPU_iret),                              \
881                   ANNOTATE_RETPOLINE_SAFE;                              \
882                   jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
883
884 #define DISABLE_INTERRUPTS(clobbers)                                    \
885         PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),                       \
886                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
887                   ANNOTATE_RETPOLINE_SAFE;                              \
888                   call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);        \
889                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
890
891 #define ENABLE_INTERRUPTS(clobbers)                                     \
892         PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),                        \
893                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
894                   ANNOTATE_RETPOLINE_SAFE;                              \
895                   call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);         \
896                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
897 #endif
898
899 #ifdef CONFIG_X86_64
900 #ifdef CONFIG_PARAVIRT_XXL
901 /*
902  * If swapgs is used while the userspace stack is still current,
903  * there's no way to call a pvop.  The PV replacement *must* be
904  * inlined, or the swapgs instruction must be trapped and emulated.
905  */
906 #define SWAPGS_UNSAFE_STACK                                             \
907         PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
908
909 /*
910  * Note: swapgs is very special, and in practise is either going to be
911  * implemented with a single "swapgs" instruction or something very
912  * special.  Either way, we don't need to save any registers for
913  * it.
914  */
915 #define SWAPGS                                                          \
916         PARA_SITE(PARA_PATCH(PV_CPU_swapgs),                            \
917                   ANNOTATE_RETPOLINE_SAFE;                              \
918                   call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);             \
919                  )
920 #endif
921
922 #define GET_CR2_INTO_RAX                                \
923         ANNOTATE_RETPOLINE_SAFE;                                \
924         call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);
925
926 #ifdef CONFIG_PARAVIRT_XXL
927 #define USERGS_SYSRET64                                                 \
928         PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64),                   \
929                   ANNOTATE_RETPOLINE_SAFE;                              \
930                   jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
931 #endif
932
933 #ifdef CONFIG_DEBUG_ENTRY
934 #define SAVE_FLAGS(clobbers)                                        \
935         PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),                       \
936                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
937                   ANNOTATE_RETPOLINE_SAFE;                          \
938                   call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);        \
939                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
940 #endif
941
942 #endif  /* CONFIG_X86_32 */
943
944 #endif /* __ASSEMBLY__ */
945 #else  /* CONFIG_PARAVIRT */
946 # define default_banner x86_init_noop
947 #endif /* !CONFIG_PARAVIRT */
948
949 #ifndef __ASSEMBLY__
950 #ifndef CONFIG_PARAVIRT_XXL
951 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
952                                           struct mm_struct *mm)
953 {
954 }
955 #endif
956
957 #ifndef CONFIG_PARAVIRT
958 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
959 {
960 }
961 #endif
962 #endif /* __ASSEMBLY__ */
963 #endif /* _ASM_X86_PARAVIRT_H */