4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
41 #include <linux/sched/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/export.h>
47 #include <linux/init.h>
48 #include <linux/gfp.h>
49 #include <linux/memblock.h>
50 #include <linux/seq_file.h>
51 #include <linux/crash_dump.h>
52 #ifdef CONFIG_KEXEC_CORE
53 #include <linux/kexec.h>
56 #include <trace/events/xen.h>
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
60 #include <asm/fixmap.h>
61 #include <asm/mmu_context.h>
62 #include <asm/setup.h>
63 #include <asm/paravirt.h>
64 #include <asm/e820/api.h>
65 #include <asm/linkage.h>
71 #include <asm/xen/hypercall.h>
72 #include <asm/xen/hypervisor.h>
76 #include <xen/interface/xen.h>
77 #include <xen/interface/hvm/hvm_op.h>
78 #include <xen/interface/version.h>
79 #include <xen/interface/memory.h>
80 #include <xen/hvc-console.h>
82 #include "multicalls.h"
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
92 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
96 /* l3 pud for userspace vsyscall mapping */
97 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98 #endif /* CONFIG_X86_64 */
101 * Note about cr3 (pagetable base) values:
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
114 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
117 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
120 * Just beyond the highest usermode address. STACK_TOP_MAX has a
121 * redzone above it, so round it up to a PGD boundary.
123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125 void make_lowmem_page_readonly(void *vaddr)
128 unsigned long address = (unsigned long)vaddr;
131 pte = lookup_address(address, &level);
133 return; /* vaddr missing */
135 ptev = pte_wrprotect(*pte);
137 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
141 void make_lowmem_page_readwrite(void *vaddr)
144 unsigned long address = (unsigned long)vaddr;
147 pte = lookup_address(address, &level);
149 return; /* vaddr missing */
151 ptev = pte_mkwrite(*pte);
153 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
158 static bool xen_page_pinned(void *ptr)
160 struct page *page = virt_to_page(ptr);
162 return PagePinned(page);
165 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
167 struct multicall_space mcs;
168 struct mmu_update *u;
170 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
172 mcs = xen_mc_entry(sizeof(*u));
175 /* ptep might be kmapped when using 32-bit HIGHPTE */
176 u->ptr = virt_to_machine(ptep).maddr;
177 u->val = pte_val_ma(pteval);
179 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
181 xen_mc_issue(PARAVIRT_LAZY_MMU);
183 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
185 static void xen_extend_mmu_update(const struct mmu_update *update)
187 struct multicall_space mcs;
188 struct mmu_update *u;
190 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
192 if (mcs.mc != NULL) {
195 mcs = __xen_mc_entry(sizeof(*u));
196 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
203 static void xen_extend_mmuext_op(const struct mmuext_op *op)
205 struct multicall_space mcs;
208 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
210 if (mcs.mc != NULL) {
213 mcs = __xen_mc_entry(sizeof(*u));
214 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
221 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
229 /* ptr may be ioremapped for 64-bit pagetable setup */
230 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
231 u.val = pmd_val_ma(val);
232 xen_extend_mmu_update(&u);
234 xen_mc_issue(PARAVIRT_LAZY_MMU);
239 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
241 trace_xen_mmu_set_pmd(ptr, val);
243 /* If page is not pinned, we can just update the entry
245 if (!xen_page_pinned(ptr)) {
250 xen_set_pmd_hyper(ptr, val);
254 * Associate a virtual page frame with a given physical page frame
255 * and protection flags for that frame.
257 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
259 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
262 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
266 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
271 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
272 u.val = pte_val_ma(pteval);
273 xen_extend_mmu_update(&u);
275 xen_mc_issue(PARAVIRT_LAZY_MMU);
280 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
282 if (!xen_batched_set_pte(ptep, pteval)) {
284 * Could call native_set_pte() here and trap and
285 * emulate the PTE write but with 32-bit guests this
286 * needs two traps (one for each of the two 32-bit
287 * words in the PTE) so do one hypercall directly
292 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
293 u.val = pte_val_ma(pteval);
294 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
298 static void xen_set_pte(pte_t *ptep, pte_t pteval)
300 trace_xen_mmu_set_pte(ptep, pteval);
301 __xen_set_pte(ptep, pteval);
304 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
305 pte_t *ptep, pte_t pteval)
307 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
308 __xen_set_pte(ptep, pteval);
311 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
312 unsigned long addr, pte_t *ptep)
314 /* Just return the pte as-is. We preserve the bits on commit */
315 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
319 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
320 pte_t *ptep, pte_t pte)
324 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
327 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
328 u.val = pte_val_ma(pte);
329 xen_extend_mmu_update(&u);
331 xen_mc_issue(PARAVIRT_LAZY_MMU);
334 /* Assume pteval_t is equivalent to all the other *val_t types. */
335 static pteval_t pte_mfn_to_pfn(pteval_t val)
337 if (val & _PAGE_PRESENT) {
338 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
339 unsigned long pfn = mfn_to_pfn(mfn);
341 pteval_t flags = val & PTE_FLAGS_MASK;
342 if (unlikely(pfn == ~0))
343 val = flags & ~_PAGE_PRESENT;
345 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
351 static pteval_t pte_pfn_to_mfn(pteval_t val)
353 if (val & _PAGE_PRESENT) {
354 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
355 pteval_t flags = val & PTE_FLAGS_MASK;
358 mfn = __pfn_to_mfn(pfn);
361 * If there's no mfn for the pfn, then just create an
362 * empty non-present pte. Unfortunately this loses
363 * information about the original pfn, so
364 * pte_mfn_to_pfn is asymmetric.
366 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
370 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
371 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
377 __visible pteval_t xen_pte_val(pte_t pte)
379 pteval_t pteval = pte.pte;
381 return pte_mfn_to_pfn(pteval);
383 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
385 __visible pgdval_t xen_pgd_val(pgd_t pgd)
387 return pte_mfn_to_pfn(pgd.pgd);
389 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
391 __visible pte_t xen_make_pte(pteval_t pte)
393 pte = pte_pfn_to_mfn(pte);
395 return native_make_pte(pte);
397 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
399 __visible pgd_t xen_make_pgd(pgdval_t pgd)
401 pgd = pte_pfn_to_mfn(pgd);
402 return native_make_pgd(pgd);
404 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
406 __visible pmdval_t xen_pmd_val(pmd_t pmd)
408 return pte_mfn_to_pfn(pmd.pmd);
410 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
412 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
420 /* ptr may be ioremapped for 64-bit pagetable setup */
421 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
422 u.val = pud_val_ma(val);
423 xen_extend_mmu_update(&u);
425 xen_mc_issue(PARAVIRT_LAZY_MMU);
430 static void xen_set_pud(pud_t *ptr, pud_t val)
432 trace_xen_mmu_set_pud(ptr, val);
434 /* If page is not pinned, we can just update the entry
436 if (!xen_page_pinned(ptr)) {
441 xen_set_pud_hyper(ptr, val);
444 #ifdef CONFIG_X86_PAE
445 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
447 trace_xen_mmu_set_pte_atomic(ptep, pte);
448 set_64bit((u64 *)ptep, native_pte_val(pte));
451 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
453 trace_xen_mmu_pte_clear(mm, addr, ptep);
454 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
455 native_pte_clear(mm, addr, ptep);
458 static void xen_pmd_clear(pmd_t *pmdp)
460 trace_xen_mmu_pmd_clear(pmdp);
461 set_pmd(pmdp, __pmd(0));
463 #endif /* CONFIG_X86_PAE */
465 __visible pmd_t xen_make_pmd(pmdval_t pmd)
467 pmd = pte_pfn_to_mfn(pmd);
468 return native_make_pmd(pmd);
470 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
472 #if CONFIG_PGTABLE_LEVELS == 4
473 __visible pudval_t xen_pud_val(pud_t pud)
475 return pte_mfn_to_pfn(pud.pud);
477 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
479 __visible pud_t xen_make_pud(pudval_t pud)
481 pud = pte_pfn_to_mfn(pud);
483 return native_make_pud(pud);
485 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
487 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
489 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
490 unsigned offset = pgd - pgd_page;
491 pgd_t *user_ptr = NULL;
493 if (offset < pgd_index(USER_LIMIT)) {
494 struct page *page = virt_to_page(pgd_page);
495 user_ptr = (pgd_t *)page->private;
503 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
507 u.ptr = virt_to_machine(ptr).maddr;
508 u.val = p4d_val_ma(val);
509 xen_extend_mmu_update(&u);
513 * Raw hypercall-based set_p4d, intended for in early boot before
514 * there's a page structure. This implies:
515 * 1. The only existing pagetable is the kernel's
516 * 2. It is always pinned
517 * 3. It has no user pagetable attached to it
519 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
525 __xen_set_p4d_hyper(ptr, val);
527 xen_mc_issue(PARAVIRT_LAZY_MMU);
532 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
534 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
537 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
539 /* If page is not pinned, we can just update the entry
541 if (!xen_page_pinned(ptr)) {
544 WARN_ON(xen_page_pinned(user_ptr));
545 pgd_val.pgd = p4d_val_ma(val);
551 /* If it's pinned, then we can at least batch the kernel and
552 user updates together. */
555 __xen_set_p4d_hyper(ptr, val);
557 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
559 xen_mc_issue(PARAVIRT_LAZY_MMU);
561 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
563 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
564 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
565 bool last, unsigned long limit)
567 int i, nr, flush = 0;
569 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
570 for (i = 0; i < nr; i++) {
571 if (!pmd_none(pmd[i]))
572 flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
577 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
578 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
579 bool last, unsigned long limit)
581 int i, nr, flush = 0;
583 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
584 for (i = 0; i < nr; i++) {
587 if (pud_none(pud[i]))
590 pmd = pmd_offset(&pud[i], 0);
591 if (PTRS_PER_PMD > 1)
592 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
593 flush |= xen_pmd_walk(mm, pmd, func,
594 last && i == nr - 1, limit);
599 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
600 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
601 bool last, unsigned long limit)
603 int i, nr, flush = 0;
605 nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
606 for (i = 0; i < nr; i++) {
609 if (p4d_none(p4d[i]))
612 pud = pud_offset(&p4d[i], 0);
613 if (PTRS_PER_PUD > 1)
614 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
615 flush |= xen_pud_walk(mm, pud, func,
616 last && i == nr - 1, limit);
622 * (Yet another) pagetable walker. This one is intended for pinning a
623 * pagetable. This means that it walks a pagetable and calls the
624 * callback function on each page it finds making up the page table,
625 * at every level. It walks the entire pagetable, but it only bothers
626 * pinning pte pages which are below limit. In the normal case this
627 * will be STACK_TOP_MAX, but at boot we need to pin up to
630 * For 32-bit the important bit is that we don't pin beyond there,
631 * because then we start getting into Xen's ptes.
633 * For 64-bit, we must skip the Xen hole in the middle of the address
634 * space, just after the big x86-64 virtual hole.
636 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
637 int (*func)(struct mm_struct *mm, struct page *,
641 int i, nr, flush = 0;
642 unsigned hole_low, hole_high;
644 /* The limit is the last byte to be touched */
646 BUG_ON(limit >= FIXADDR_TOP);
649 * 64-bit has a great big hole in the middle of the address
650 * space, which contains the Xen mappings. On 32-bit these
651 * will end up making a zero-sized hole and so is a no-op.
653 hole_low = pgd_index(USER_LIMIT);
654 hole_high = pgd_index(PAGE_OFFSET);
656 nr = pgd_index(limit) + 1;
657 for (i = 0; i < nr; i++) {
660 if (i >= hole_low && i < hole_high)
663 if (pgd_none(pgd[i]))
666 p4d = p4d_offset(&pgd[i], 0);
667 if (PTRS_PER_P4D > 1)
668 flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
669 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
672 /* Do the top level last, so that the callbacks can use it as
673 a cue to do final things like tlb flushes. */
674 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
679 static int xen_pgd_walk(struct mm_struct *mm,
680 int (*func)(struct mm_struct *mm, struct page *,
684 return __xen_pgd_walk(mm, mm->pgd, func, limit);
687 /* If we're using split pte locks, then take the page's lock and
688 return a pointer to it. Otherwise return NULL. */
689 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
691 spinlock_t *ptl = NULL;
693 #if USE_SPLIT_PTE_PTLOCKS
694 ptl = ptlock_ptr(page);
695 spin_lock_nest_lock(ptl, &mm->page_table_lock);
701 static void xen_pte_unlock(void *v)
707 static void xen_do_pin(unsigned level, unsigned long pfn)
712 op.arg1.mfn = pfn_to_mfn(pfn);
714 xen_extend_mmuext_op(&op);
717 static int xen_pin_page(struct mm_struct *mm, struct page *page,
720 unsigned pgfl = TestSetPagePinned(page);
724 flush = 0; /* already pinned */
725 else if (PageHighMem(page))
726 /* kmaps need flushing if we found an unpinned
730 void *pt = lowmem_page_address(page);
731 unsigned long pfn = page_to_pfn(page);
732 struct multicall_space mcs = __xen_mc_entry(0);
738 * We need to hold the pagetable lock between the time
739 * we make the pagetable RO and when we actually pin
740 * it. If we don't, then other users may come in and
741 * attempt to update the pagetable by writing it,
742 * which will fail because the memory is RO but not
743 * pinned, so Xen won't do the trap'n'emulate.
745 * If we're using split pte locks, we can't hold the
746 * entire pagetable's worth of locks during the
747 * traverse, because we may wrap the preempt count (8
748 * bits). The solution is to mark RO and pin each PTE
749 * page while holding the lock. This means the number
750 * of locks we end up holding is never more than a
751 * batch size (~32 entries, at present).
753 * If we're not using split pte locks, we needn't pin
754 * the PTE pages independently, because we're
755 * protected by the overall pagetable lock.
759 ptl = xen_pte_lock(page, mm);
761 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
762 pfn_pte(pfn, PAGE_KERNEL_RO),
763 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
766 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
768 /* Queue a deferred unlock for when this batch
770 xen_mc_callback(xen_pte_unlock, ptl);
777 /* This is called just after a mm has been created, but it has not
778 been used yet. We need to make sure that its pagetable is all
779 read-only, and can be pinned. */
780 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
782 trace_xen_mmu_pgd_pin(mm, pgd);
786 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
787 /* re-enable interrupts for flushing */
797 pgd_t *user_pgd = xen_get_user_pgd(pgd);
799 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
802 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
803 xen_do_pin(MMUEXT_PIN_L4_TABLE,
804 PFN_DOWN(__pa(user_pgd)));
807 #else /* CONFIG_X86_32 */
808 #ifdef CONFIG_X86_PAE
809 /* Need to make sure unshared kernel PMD is pinnable */
810 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
813 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
814 #endif /* CONFIG_X86_64 */
818 static void xen_pgd_pin(struct mm_struct *mm)
820 __xen_pgd_pin(mm, mm->pgd);
824 * On save, we need to pin all pagetables to make sure they get their
825 * mfns turned into pfns. Search the list for any unpinned pgds and pin
826 * them (unpinned pgds are not currently in use, probably because the
827 * process is under construction or destruction).
829 * Expected to be called in stop_machine() ("equivalent to taking
830 * every spinlock in the system"), so the locking doesn't really
831 * matter all that much.
833 void xen_mm_pin_all(void)
837 spin_lock(&pgd_lock);
839 list_for_each_entry(page, &pgd_list, lru) {
840 if (!PagePinned(page)) {
841 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
842 SetPageSavePinned(page);
846 spin_unlock(&pgd_lock);
850 * The init_mm pagetable is really pinned as soon as its created, but
851 * that's before we have page structures to store the bits. So do all
852 * the book-keeping now.
854 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
861 static void __init xen_mark_init_mm_pinned(void)
863 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
866 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
869 unsigned pgfl = TestClearPagePinned(page);
871 if (pgfl && !PageHighMem(page)) {
872 void *pt = lowmem_page_address(page);
873 unsigned long pfn = page_to_pfn(page);
874 spinlock_t *ptl = NULL;
875 struct multicall_space mcs;
878 * Do the converse to pin_page. If we're using split
879 * pte locks, we must be holding the lock for while
880 * the pte page is unpinned but still RO to prevent
881 * concurrent updates from seeing it in this
882 * partially-pinned state.
884 if (level == PT_PTE) {
885 ptl = xen_pte_lock(page, mm);
888 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
891 mcs = __xen_mc_entry(0);
893 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
894 pfn_pte(pfn, PAGE_KERNEL),
895 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
898 /* unlock when batch completed */
899 xen_mc_callback(xen_pte_unlock, ptl);
903 return 0; /* never need to flush on unpin */
906 /* Release a pagetables pages back as normal RW */
907 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
909 trace_xen_mmu_pgd_unpin(mm, pgd);
913 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
917 pgd_t *user_pgd = xen_get_user_pgd(pgd);
920 xen_do_pin(MMUEXT_UNPIN_TABLE,
921 PFN_DOWN(__pa(user_pgd)));
922 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
927 #ifdef CONFIG_X86_PAE
928 /* Need to make sure unshared kernel PMD is unpinned */
929 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
933 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
938 static void xen_pgd_unpin(struct mm_struct *mm)
940 __xen_pgd_unpin(mm, mm->pgd);
944 * On resume, undo any pinning done at save, so that the rest of the
945 * kernel doesn't see any unexpected pinned pagetables.
947 void xen_mm_unpin_all(void)
951 spin_lock(&pgd_lock);
953 list_for_each_entry(page, &pgd_list, lru) {
954 if (PageSavePinned(page)) {
955 BUG_ON(!PagePinned(page));
956 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
957 ClearPageSavePinned(page);
961 spin_unlock(&pgd_lock);
964 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
966 spin_lock(&next->page_table_lock);
968 spin_unlock(&next->page_table_lock);
971 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
973 spin_lock(&mm->page_table_lock);
975 spin_unlock(&mm->page_table_lock);
978 static void drop_mm_ref_this_cpu(void *info)
980 struct mm_struct *mm = info;
982 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
983 leave_mm(smp_processor_id());
986 * If this cpu still has a stale cr3 reference, then make sure
987 * it has been flushed.
989 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
995 * Another cpu may still have their %cr3 pointing at the pagetable, so
996 * we need to repoint it somewhere else before we can unpin it.
998 static void xen_drop_mm_ref(struct mm_struct *mm)
1003 drop_mm_ref_this_cpu(mm);
1005 /* Get the "official" set of cpus referring to our pagetable. */
1006 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1007 for_each_online_cpu(cpu) {
1008 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1010 smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
1016 * It's possible that a vcpu may have a stale reference to our
1017 * cr3, because its in lazy mode, and it hasn't yet flushed
1018 * its set of pending hypercalls yet. In this case, we can
1019 * look at its actual current cr3 value, and force it to flush
1022 cpumask_clear(mask);
1023 for_each_online_cpu(cpu) {
1024 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1025 cpumask_set_cpu(cpu, mask);
1028 smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
1029 free_cpumask_var(mask);
1032 static void xen_drop_mm_ref(struct mm_struct *mm)
1034 drop_mm_ref_this_cpu(mm);
1039 * While a process runs, Xen pins its pagetables, which means that the
1040 * hypervisor forces it to be read-only, and it controls all updates
1041 * to it. This means that all pagetable updates have to go via the
1042 * hypervisor, which is moderately expensive.
1044 * Since we're pulling the pagetable down, we switch to use init_mm,
1045 * unpin old process pagetable and mark it all read-write, which
1046 * allows further operations on it to be simple memory accesses.
1048 * The only subtle point is that another CPU may be still using the
1049 * pagetable because of lazy tlb flushing. This means we need need to
1050 * switch all CPUs off this pagetable before we can unpin it.
1052 static void xen_exit_mmap(struct mm_struct *mm)
1054 get_cpu(); /* make sure we don't move around */
1055 xen_drop_mm_ref(mm);
1058 spin_lock(&mm->page_table_lock);
1060 /* pgd may not be pinned in the error exit path of execve */
1061 if (xen_page_pinned(mm->pgd))
1064 spin_unlock(&mm->page_table_lock);
1067 static void xen_post_allocator_init(void);
1069 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1071 struct mmuext_op op;
1074 op.arg1.mfn = pfn_to_mfn(pfn);
1075 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1079 #ifdef CONFIG_X86_64
1080 static void __init xen_cleanhighmap(unsigned long vaddr,
1081 unsigned long vaddr_end)
1083 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1084 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1086 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1087 * We include the PMD passed in on _both_ boundaries. */
1088 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1089 pmd++, vaddr += PMD_SIZE) {
1092 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1093 set_pmd(pmd, __pmd(0));
1095 /* In case we did something silly, we should crash in this function
1096 * instead of somewhere later and be confusing. */
1101 * Make a page range writeable and free it.
1103 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1105 void *vaddr = __va(paddr);
1106 void *vaddr_end = vaddr + size;
1108 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1109 make_lowmem_page_readwrite(vaddr);
1111 memblock_free(paddr, size);
1114 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1116 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1119 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1120 ClearPagePinned(virt_to_page(__va(pa)));
1121 xen_free_ro_pages(pa, PAGE_SIZE);
1124 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1130 if (pmd_large(*pmd)) {
1131 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1132 xen_free_ro_pages(pa, PMD_SIZE);
1136 pte_tbl = pte_offset_kernel(pmd, 0);
1137 for (i = 0; i < PTRS_PER_PTE; i++) {
1138 if (pte_none(pte_tbl[i]))
1140 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1141 xen_free_ro_pages(pa, PAGE_SIZE);
1143 set_pmd(pmd, __pmd(0));
1144 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1147 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1153 if (pud_large(*pud)) {
1154 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1155 xen_free_ro_pages(pa, PUD_SIZE);
1159 pmd_tbl = pmd_offset(pud, 0);
1160 for (i = 0; i < PTRS_PER_PMD; i++) {
1161 if (pmd_none(pmd_tbl[i]))
1163 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1165 set_pud(pud, __pud(0));
1166 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1169 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1175 if (p4d_large(*p4d)) {
1176 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1177 xen_free_ro_pages(pa, P4D_SIZE);
1181 pud_tbl = pud_offset(p4d, 0);
1182 for (i = 0; i < PTRS_PER_PUD; i++) {
1183 if (pud_none(pud_tbl[i]))
1185 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1187 set_p4d(p4d, __p4d(0));
1188 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1192 * Since it is well isolated we can (and since it is perhaps large we should)
1193 * also free the page tables mapping the initial P->M table.
1195 static void __init xen_cleanmfnmap(unsigned long vaddr)
1202 unpin = (vaddr == 2 * PGDIR_SIZE);
1204 pgd = pgd_offset_k(vaddr);
1205 p4d = p4d_offset(pgd, 0);
1206 for (i = 0; i < PTRS_PER_P4D; i++) {
1207 if (p4d_none(p4d[i]))
1209 xen_cleanmfnmap_p4d(p4d + i, unpin);
1211 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
1212 set_pgd(pgd, __pgd(0));
1213 xen_cleanmfnmap_free_pgtbl(p4d, unpin);
1217 static void __init xen_pagetable_p2m_free(void)
1222 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1224 /* No memory or already called. */
1225 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1228 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1229 memset((void *)xen_start_info->mfn_list, 0xff, size);
1231 addr = xen_start_info->mfn_list;
1233 * We could be in __ka space.
1234 * We roundup to the PMD, which means that if anybody at this stage is
1235 * using the __ka address of xen_start_info or
1236 * xen_start_info->shared_info they are in going to crash. Fortunatly
1237 * we have already revectored in xen_setup_kernel_pagetable and in
1238 * xen_setup_shared_info.
1240 size = roundup(size, PMD_SIZE);
1242 if (addr >= __START_KERNEL_map) {
1243 xen_cleanhighmap(addr, addr + size);
1244 size = PAGE_ALIGN(xen_start_info->nr_pages *
1245 sizeof(unsigned long));
1246 memblock_free(__pa(addr), size);
1248 xen_cleanmfnmap(addr);
1252 static void __init xen_pagetable_cleanhighmap(void)
1257 /* At this stage, cleanup_highmap has already cleaned __ka space
1258 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1259 * the ramdisk). We continue on, erasing PMD entries that point to page
1260 * tables - do note that they are accessible at this stage via __va.
1261 * For good measure we also round up to the PMD - which means that if
1262 * anybody is using __ka address to the initial boot-stack - and try
1263 * to use it - they are going to crash. The xen_start_info has been
1264 * taken care of already in xen_setup_kernel_pagetable. */
1265 addr = xen_start_info->pt_base;
1266 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1268 xen_cleanhighmap(addr, addr + size);
1269 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1271 /* This is superfluous and is not necessary, but you know what
1272 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1273 * anything at this stage. */
1274 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1279 static void __init xen_pagetable_p2m_setup(void)
1281 xen_vmalloc_p2m_tree();
1283 #ifdef CONFIG_X86_64
1284 xen_pagetable_p2m_free();
1286 xen_pagetable_cleanhighmap();
1288 /* And revector! Bye bye old array */
1289 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1292 static void __init xen_pagetable_init(void)
1295 xen_post_allocator_init();
1297 xen_pagetable_p2m_setup();
1299 /* Allocate and initialize top and mid mfn levels for p2m structure */
1300 xen_build_mfn_list_list();
1302 /* Remap memory freed due to conflicts with E820 map */
1305 xen_setup_shared_info();
1307 static void xen_write_cr2(unsigned long cr2)
1309 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1312 static unsigned long xen_read_cr2(void)
1314 return this_cpu_read(xen_vcpu)->arch.cr2;
1317 unsigned long xen_read_cr2_direct(void)
1319 return this_cpu_read(xen_vcpu_info.arch.cr2);
1322 static void xen_flush_tlb(void)
1324 struct mmuext_op *op;
1325 struct multicall_space mcs;
1327 trace_xen_mmu_flush_tlb(0);
1331 mcs = xen_mc_entry(sizeof(*op));
1334 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1335 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1337 xen_mc_issue(PARAVIRT_LAZY_MMU);
1342 static void xen_flush_tlb_single(unsigned long addr)
1344 struct mmuext_op *op;
1345 struct multicall_space mcs;
1347 trace_xen_mmu_flush_tlb_single(addr);
1351 mcs = xen_mc_entry(sizeof(*op));
1353 op->cmd = MMUEXT_INVLPG_LOCAL;
1354 op->arg1.linear_addr = addr & PAGE_MASK;
1355 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1357 xen_mc_issue(PARAVIRT_LAZY_MMU);
1362 static void xen_flush_tlb_others(const struct cpumask *cpus,
1363 const struct flush_tlb_info *info)
1366 struct mmuext_op op;
1368 DECLARE_BITMAP(mask, num_processors);
1370 DECLARE_BITMAP(mask, NR_CPUS);
1373 struct multicall_space mcs;
1375 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1377 if (cpumask_empty(cpus))
1378 return; /* nothing to do */
1380 mcs = xen_mc_entry(sizeof(*args));
1382 args->op.arg2.vcpumask = to_cpumask(args->mask);
1384 /* Remove us, and any offline CPUS. */
1385 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1386 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1388 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1389 if (info->end != TLB_FLUSH_ALL &&
1390 (info->end - info->start) <= PAGE_SIZE) {
1391 args->op.cmd = MMUEXT_INVLPG_MULTI;
1392 args->op.arg1.linear_addr = info->start;
1395 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1397 xen_mc_issue(PARAVIRT_LAZY_MMU);
1400 static unsigned long xen_read_cr3(void)
1402 return this_cpu_read(xen_cr3);
1405 static void set_current_cr3(void *v)
1407 this_cpu_write(xen_current_cr3, (unsigned long)v);
1410 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1412 struct mmuext_op op;
1415 trace_xen_mmu_write_cr3(kernel, cr3);
1418 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1422 WARN_ON(mfn == 0 && kernel);
1424 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1427 xen_extend_mmuext_op(&op);
1430 this_cpu_write(xen_cr3, cr3);
1432 /* Update xen_current_cr3 once the batch has actually
1434 xen_mc_callback(set_current_cr3, (void *)cr3);
1437 static void xen_write_cr3(unsigned long cr3)
1439 BUG_ON(preemptible());
1441 xen_mc_batch(); /* disables interrupts */
1443 /* Update while interrupts are disabled, so its atomic with
1445 this_cpu_write(xen_cr3, cr3);
1447 __xen_write_cr3(true, cr3);
1449 #ifdef CONFIG_X86_64
1451 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1453 __xen_write_cr3(false, __pa(user_pgd));
1455 __xen_write_cr3(false, 0);
1459 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1462 #ifdef CONFIG_X86_64
1464 * At the start of the day - when Xen launches a guest, it has already
1465 * built pagetables for the guest. We diligently look over them
1466 * in xen_setup_kernel_pagetable and graft as appropriate them in the
1467 * init_top_pgt and its friends. Then when we are happy we load
1468 * the new init_top_pgt - and continue on.
1470 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1471 * up the rest of the pagetables. When it has completed it loads the cr3.
1472 * N.B. that baremetal would start at 'start_kernel' (and the early
1473 * #PF handler would create bootstrap pagetables) - so we are running
1474 * with the same assumptions as what to do when write_cr3 is executed
1477 * Since there are no user-page tables at all, we have two variants
1478 * of xen_write_cr3 - the early bootup (this one), and the late one
1479 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1480 * the Linux kernel and user-space are both in ring 3 while the
1481 * hypervisor is in ring 0.
1483 static void __init xen_write_cr3_init(unsigned long cr3)
1485 BUG_ON(preemptible());
1487 xen_mc_batch(); /* disables interrupts */
1489 /* Update while interrupts are disabled, so its atomic with
1491 this_cpu_write(xen_cr3, cr3);
1493 __xen_write_cr3(true, cr3);
1495 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1499 static int xen_pgd_alloc(struct mm_struct *mm)
1501 pgd_t *pgd = mm->pgd;
1504 BUG_ON(PagePinned(virt_to_page(pgd)));
1506 #ifdef CONFIG_X86_64
1508 struct page *page = virt_to_page(pgd);
1511 BUG_ON(page->private != 0);
1515 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1516 page->private = (unsigned long)user_pgd;
1518 if (user_pgd != NULL) {
1519 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1520 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1521 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1526 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1532 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1534 #ifdef CONFIG_X86_64
1535 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1538 free_page((unsigned long)user_pgd);
1543 * Init-time set_pte while constructing initial pagetables, which
1544 * doesn't allow RO page table pages to be remapped RW.
1546 * If there is no MFN for this PFN then this page is initially
1547 * ballooned out so clear the PTE (as in decrease_reservation() in
1548 * drivers/xen/balloon.c).
1550 * Many of these PTE updates are done on unpinned and writable pages
1551 * and doing a hypercall for these is unnecessary and expensive. At
1552 * this point it is not possible to tell if a page is pinned or not,
1553 * so always write the PTE directly and rely on Xen trapping and
1554 * emulating any updates as necessary.
1556 __visible pte_t xen_make_pte_init(pteval_t pte)
1558 #ifdef CONFIG_X86_64
1562 * Pages belonging to the initial p2m list mapped outside the default
1563 * address range must be mapped read-only. This region contains the
1564 * page tables for mapping the p2m list, too, and page tables MUST be
1567 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1568 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1569 pfn >= xen_start_info->first_p2m_pfn &&
1570 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1573 pte = pte_pfn_to_mfn(pte);
1574 return native_make_pte(pte);
1576 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1578 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1580 #ifdef CONFIG_X86_32
1581 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1582 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1583 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1584 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1587 native_set_pte(ptep, pte);
1590 /* Early in boot, while setting up the initial pagetable, assume
1591 everything is pinned. */
1592 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1594 #ifdef CONFIG_FLATMEM
1595 BUG_ON(mem_map); /* should only be used early */
1597 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1598 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1601 /* Used for pmd and pud */
1602 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1604 #ifdef CONFIG_FLATMEM
1605 BUG_ON(mem_map); /* should only be used early */
1607 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1610 /* Early release_pte assumes that all pts are pinned, since there's
1611 only init_mm and anything attached to that is pinned. */
1612 static void __init xen_release_pte_init(unsigned long pfn)
1614 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1615 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1618 static void __init xen_release_pmd_init(unsigned long pfn)
1620 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1623 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1625 struct multicall_space mcs;
1626 struct mmuext_op *op;
1628 mcs = __xen_mc_entry(sizeof(*op));
1631 op->arg1.mfn = pfn_to_mfn(pfn);
1633 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1636 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1638 struct multicall_space mcs;
1639 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1641 mcs = __xen_mc_entry(0);
1642 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1643 pfn_pte(pfn, prot), 0);
1646 /* This needs to make sure the new pte page is pinned iff its being
1647 attached to a pinned pagetable. */
1648 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1651 bool pinned = PagePinned(virt_to_page(mm->pgd));
1653 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1656 struct page *page = pfn_to_page(pfn);
1658 SetPagePinned(page);
1660 if (!PageHighMem(page)) {
1663 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1665 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1666 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1668 xen_mc_issue(PARAVIRT_LAZY_MMU);
1670 /* make sure there are no stray mappings of
1672 kmap_flush_unused();
1677 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1679 xen_alloc_ptpage(mm, pfn, PT_PTE);
1682 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1684 xen_alloc_ptpage(mm, pfn, PT_PMD);
1687 /* This should never happen until we're OK to use struct page */
1688 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1690 struct page *page = pfn_to_page(pfn);
1691 bool pinned = PagePinned(page);
1693 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1696 if (!PageHighMem(page)) {
1699 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1700 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1702 __set_pfn_prot(pfn, PAGE_KERNEL);
1704 xen_mc_issue(PARAVIRT_LAZY_MMU);
1706 ClearPagePinned(page);
1710 static void xen_release_pte(unsigned long pfn)
1712 xen_release_ptpage(pfn, PT_PTE);
1715 static void xen_release_pmd(unsigned long pfn)
1717 xen_release_ptpage(pfn, PT_PMD);
1720 #if CONFIG_PGTABLE_LEVELS >= 4
1721 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1723 xen_alloc_ptpage(mm, pfn, PT_PUD);
1726 static void xen_release_pud(unsigned long pfn)
1728 xen_release_ptpage(pfn, PT_PUD);
1732 void __init xen_reserve_top(void)
1734 #ifdef CONFIG_X86_32
1735 unsigned long top = HYPERVISOR_VIRT_START;
1736 struct xen_platform_parameters pp;
1738 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1739 top = pp.virt_start;
1741 reserve_top_address(-top);
1742 #endif /* CONFIG_X86_32 */
1746 * Like __va(), but returns address in the kernel mapping (which is
1747 * all we have until the physical memory mapping has been set up.
1749 static void * __init __ka(phys_addr_t paddr)
1751 #ifdef CONFIG_X86_64
1752 return (void *)(paddr + __START_KERNEL_map);
1758 /* Convert a machine address to physical address */
1759 static unsigned long __init m2p(phys_addr_t maddr)
1763 maddr &= PTE_PFN_MASK;
1764 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1769 /* Convert a machine address to kernel virtual */
1770 static void * __init m2v(phys_addr_t maddr)
1772 return __ka(m2p(maddr));
1775 /* Set the page permissions on an identity-mapped pages */
1776 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1777 unsigned long flags)
1779 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1780 pte_t pte = pfn_pte(pfn, prot);
1782 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1785 static void __init set_page_prot(void *addr, pgprot_t prot)
1787 return set_page_prot_flags(addr, prot, UVMF_NONE);
1789 #ifdef CONFIG_X86_32
1790 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1792 unsigned pmdidx, pteidx;
1796 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1801 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1804 /* Reuse or allocate a page of ptes */
1805 if (pmd_present(pmd[pmdidx]))
1806 pte_page = m2v(pmd[pmdidx].pmd);
1808 /* Check for free pte pages */
1809 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1812 pte_page = &level1_ident_pgt[ident_pte];
1813 ident_pte += PTRS_PER_PTE;
1815 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1818 /* Install mappings */
1819 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1822 if (pfn > max_pfn_mapped)
1823 max_pfn_mapped = pfn;
1825 if (!pte_none(pte_page[pteidx]))
1828 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1829 pte_page[pteidx] = pte;
1833 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1834 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1836 set_page_prot(pmd, PAGE_KERNEL_RO);
1839 void __init xen_setup_machphys_mapping(void)
1841 struct xen_machphys_mapping mapping;
1843 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1844 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1845 machine_to_phys_nr = mapping.max_mfn + 1;
1847 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1849 #ifdef CONFIG_X86_32
1850 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1851 < machine_to_phys_mapping);
1855 #ifdef CONFIG_X86_64
1856 static void __init convert_pfn_mfn(void *v)
1861 /* All levels are converted the same way, so just treat them
1863 for (i = 0; i < PTRS_PER_PTE; i++)
1864 pte[i] = xen_make_pte(pte[i].pte);
1866 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1869 if (*pt_base == PFN_DOWN(__pa(addr))) {
1870 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1871 clear_page((void *)addr);
1874 if (*pt_end == PFN_DOWN(__pa(addr))) {
1875 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1876 clear_page((void *)addr);
1881 * Set up the initial kernel pagetable.
1883 * We can construct this by grafting the Xen provided pagetable into
1884 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1885 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1886 * kernel has a physical mapping to start with - but that's enough to
1887 * get __va working. We need to fill in the rest of the physical
1888 * mapping once some sort of allocator has been set up.
1890 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1894 unsigned long addr[3];
1895 unsigned long pt_base, pt_end;
1898 /* max_pfn_mapped is the last pfn mapped in the initial memory
1899 * mappings. Considering that on Xen after the kernel mappings we
1900 * have the mappings of some pages that don't exist in pfn space, we
1901 * set max_pfn_mapped to the last real pfn mapped. */
1902 if (xen_start_info->mfn_list < __START_KERNEL_map)
1903 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1905 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1907 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1908 pt_end = pt_base + xen_start_info->nr_pt_frames;
1910 /* Zap identity mapping */
1911 init_top_pgt[0] = __pgd(0);
1913 /* Pre-constructed entries are in pfn, so convert to mfn */
1914 /* L4[272] -> level3_ident_pgt */
1915 /* L4[511] -> level3_kernel_pgt */
1916 convert_pfn_mfn(init_top_pgt);
1918 /* L3_i[0] -> level2_ident_pgt */
1919 convert_pfn_mfn(level3_ident_pgt);
1920 /* L3_k[510] -> level2_kernel_pgt */
1921 /* L3_k[511] -> level2_fixmap_pgt */
1922 convert_pfn_mfn(level3_kernel_pgt);
1924 /* L3_k[511][506] -> level1_fixmap_pgt */
1925 convert_pfn_mfn(level2_fixmap_pgt);
1927 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1928 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1929 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1931 addr[0] = (unsigned long)pgd;
1932 addr[1] = (unsigned long)l3;
1933 addr[2] = (unsigned long)l2;
1934 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1935 * Both L4[272][0] and L4[511][510] have entries that point to the same
1936 * L2 (PMD) tables. Meaning that if you modify it in __va space
1937 * it will be also modified in the __ka space! (But if you just
1938 * modify the PMD table to point to other PTE's or none, then you
1939 * are OK - which is what cleanup_highmap does) */
1940 copy_page(level2_ident_pgt, l2);
1941 /* Graft it onto L4[511][510] */
1942 copy_page(level2_kernel_pgt, l2);
1944 /* Copy the initial P->M table mappings if necessary. */
1945 i = pgd_index(xen_start_info->mfn_list);
1946 if (i && i < pgd_index(__START_KERNEL_map))
1947 init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1949 /* Make pagetable pieces RO */
1950 set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1951 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1952 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1953 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1954 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1955 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1956 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1957 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1959 /* Pin down new L4 */
1960 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1961 PFN_DOWN(__pa_symbol(init_top_pgt)));
1963 /* Unpin Xen-provided one */
1964 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1967 * At this stage there can be no user pgd, and no page structure to
1968 * attach it to, so make sure we just set kernel pgd.
1971 __xen_write_cr3(true, __pa(init_top_pgt));
1972 xen_mc_issue(PARAVIRT_LAZY_CPU);
1974 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1975 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1976 * the initial domain. For guests using the toolstack, they are in:
1977 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1978 * rip out the [L4] (pgd), but for guests we shave off three pages.
1980 for (i = 0; i < ARRAY_SIZE(addr); i++)
1981 check_pt_base(&pt_base, &pt_end, addr[i]);
1983 /* Our (by three pages) smaller Xen pagetable that we are using */
1984 xen_pt_base = PFN_PHYS(pt_base);
1985 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1986 memblock_reserve(xen_pt_base, xen_pt_size);
1988 /* Revector the xen_start_info */
1989 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1993 * Read a value from a physical address.
1995 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1997 unsigned long *vaddr;
2000 vaddr = early_memremap_ro(addr, sizeof(val));
2002 early_memunmap(vaddr, sizeof(val));
2007 * Translate a virtual address to a physical one without relying on mapped
2008 * page tables. Don't rely on big pages being aligned in (guest) physical
2011 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2020 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2022 if (!pgd_present(pgd))
2025 pa = pgd_val(pgd) & PTE_PFN_MASK;
2026 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2028 if (!pud_present(pud))
2030 pa = pud_val(pud) & PTE_PFN_MASK;
2032 return pa + (vaddr & ~PUD_MASK);
2034 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2036 if (!pmd_present(pmd))
2038 pa = pmd_val(pmd) & PTE_PFN_MASK;
2040 return pa + (vaddr & ~PMD_MASK);
2042 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2044 if (!pte_present(pte))
2046 pa = pte_pfn(pte) << PAGE_SHIFT;
2048 return pa | (vaddr & ~PAGE_MASK);
2052 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2055 void __init xen_relocate_p2m(void)
2057 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
2058 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2059 int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
2065 unsigned long *new_p2m;
2068 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2069 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2070 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2071 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2072 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2073 if (PTRS_PER_P4D > 1)
2074 n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2077 n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
2079 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2081 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2086 * Setup the page tables for addressing the new p2m list.
2087 * We have asked the hypervisor to map the p2m list at the user address
2088 * PUD_SIZE. It may have done so, or it may have used a kernel space
2089 * address depending on the Xen version.
2090 * To avoid any possible virtual address collision, just use
2091 * 2 * PUD_SIZE for the new area.
2093 p4d_phys = new_area;
2094 pud_phys = p4d_phys + PFN_PHYS(n_p4d);
2095 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2096 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2097 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2099 pgd = __va(read_cr3_pa());
2100 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2105 p4d = early_memremap(p4d_phys, PAGE_SIZE);
2107 n_pud = min(save_pud, PTRS_PER_P4D);
2109 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2110 pud = early_memremap(pud_phys, PAGE_SIZE);
2112 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2114 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2116 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2118 pt = early_memremap(pt_phys, PAGE_SIZE);
2121 idx_pte < min(n_pte, PTRS_PER_PTE);
2123 set_pte(pt + idx_pte,
2124 pfn_pte(p2m_pfn, PAGE_KERNEL));
2127 n_pte -= PTRS_PER_PTE;
2128 early_memunmap(pt, PAGE_SIZE);
2129 make_lowmem_page_readonly(__va(pt_phys));
2130 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2132 set_pmd(pmd + idx_pt,
2133 __pmd(_PAGE_TABLE | pt_phys));
2134 pt_phys += PAGE_SIZE;
2136 n_pt -= PTRS_PER_PMD;
2137 early_memunmap(pmd, PAGE_SIZE);
2138 make_lowmem_page_readonly(__va(pmd_phys));
2139 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2140 PFN_DOWN(pmd_phys));
2141 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2142 pmd_phys += PAGE_SIZE;
2144 n_pmd -= PTRS_PER_PUD;
2145 early_memunmap(pud, PAGE_SIZE);
2146 make_lowmem_page_readonly(__va(pud_phys));
2147 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2149 set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
2151 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2152 pud_phys += PAGE_SIZE;
2155 save_pud -= PTRS_PER_P4D;
2156 early_memunmap(p4d, PAGE_SIZE);
2157 make_lowmem_page_readonly(__va(p4d_phys));
2158 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
2159 set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
2160 p4d_phys += PAGE_SIZE;
2162 } while (++idx_p4d < n_p4d);
2164 /* Now copy the old p2m info to the new area. */
2165 memcpy(new_p2m, xen_p2m_addr, size);
2166 xen_p2m_addr = new_p2m;
2168 /* Release the old p2m list and set new list info. */
2169 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2171 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2173 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2174 pfn = xen_start_info->first_p2m_pfn;
2175 pfn_end = xen_start_info->first_p2m_pfn +
2176 xen_start_info->nr_p2m_frames;
2177 set_pgd(pgd + 1, __pgd(0));
2180 pfn_end = p2m_pfn_end;
2183 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2184 while (pfn < pfn_end) {
2185 if (pfn == p2m_pfn) {
2189 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2193 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2194 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2195 xen_start_info->nr_p2m_frames = n_frames;
2198 #else /* !CONFIG_X86_64 */
2199 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2200 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2202 static void __init xen_write_cr3_init(unsigned long cr3)
2204 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2206 BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2207 BUG_ON(cr3 != __pa(swapper_pg_dir));
2210 * We are switching to swapper_pg_dir for the first time (from
2211 * initial_page_table) and therefore need to mark that page
2212 * read-only and then pin it.
2214 * Xen disallows sharing of kernel PMDs for PAE
2215 * guests. Therefore we must copy the kernel PMD from
2216 * initial_page_table into a new kernel PMD to be used in
2219 swapper_kernel_pmd =
2220 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2221 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2222 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2223 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2224 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2226 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2228 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2230 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2231 PFN_DOWN(__pa(initial_page_table)));
2232 set_page_prot(initial_page_table, PAGE_KERNEL);
2233 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2235 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2239 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2240 * not the first page table in the page table pool.
2241 * Iterate through the initial page tables to find the real page table base.
2243 static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2245 phys_addr_t pt_base, paddr;
2248 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2250 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2251 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2252 paddr = m2p(pmd[pmdidx].pmd);
2253 pt_base = min(pt_base, paddr);
2259 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2263 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2265 xen_pt_base = xen_find_pt_base(kernel_pmd);
2266 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2268 initial_kernel_pmd =
2269 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2271 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2273 copy_page(initial_kernel_pmd, kernel_pmd);
2275 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2277 copy_page(initial_page_table, pgd);
2278 initial_page_table[KERNEL_PGD_BOUNDARY] =
2279 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2281 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2282 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2283 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2285 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2287 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2288 PFN_DOWN(__pa(initial_page_table)));
2289 xen_write_cr3(__pa(initial_page_table));
2291 memblock_reserve(xen_pt_base, xen_pt_size);
2293 #endif /* CONFIG_X86_64 */
2295 void __init xen_reserve_special_pages(void)
2299 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2300 if (xen_start_info->store_mfn) {
2301 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2302 memblock_reserve(paddr, PAGE_SIZE);
2304 if (!xen_initial_domain()) {
2305 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2306 memblock_reserve(paddr, PAGE_SIZE);
2310 void __init xen_pt_check_e820(void)
2312 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2313 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2318 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2320 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2324 phys >>= PAGE_SHIFT;
2327 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2329 #ifdef CONFIG_X86_32
2331 # ifdef CONFIG_HIGHMEM
2332 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2334 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2337 case FIX_TEXT_POKE0:
2338 case FIX_TEXT_POKE1:
2339 case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
2340 /* All local page mappings */
2341 pte = pfn_pte(phys, prot);
2344 #ifdef CONFIG_X86_LOCAL_APIC
2345 case FIX_APIC_BASE: /* maps dummy local APIC */
2346 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2350 #ifdef CONFIG_X86_IO_APIC
2351 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2353 * We just don't map the IO APIC - all access is via
2354 * hypercalls. Keep the address in the pte for reference.
2356 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2360 case FIX_PARAVIRT_BOOTMAP:
2361 /* This is an MFN, but it isn't an IO mapping from the
2363 pte = mfn_pte(phys, prot);
2367 /* By default, set_fixmap is used for hardware mappings */
2368 pte = mfn_pte(phys, prot);
2372 __native_set_fixmap(idx, pte);
2374 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2375 /* Replicate changes to map the vsyscall page into the user
2376 pagetable vsyscall mapping. */
2377 if (idx == VSYSCALL_PAGE) {
2378 unsigned long vaddr = __fix_to_virt(idx);
2379 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2384 static void __init xen_post_allocator_init(void)
2386 pv_mmu_ops.set_pte = xen_set_pte;
2387 pv_mmu_ops.set_pmd = xen_set_pmd;
2388 pv_mmu_ops.set_pud = xen_set_pud;
2389 #if CONFIG_PGTABLE_LEVELS >= 4
2390 pv_mmu_ops.set_p4d = xen_set_p4d;
2393 /* This will work as long as patching hasn't happened yet
2394 (which it hasn't) */
2395 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2396 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2397 pv_mmu_ops.release_pte = xen_release_pte;
2398 pv_mmu_ops.release_pmd = xen_release_pmd;
2399 #if CONFIG_PGTABLE_LEVELS >= 4
2400 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2401 pv_mmu_ops.release_pud = xen_release_pud;
2403 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2405 #ifdef CONFIG_X86_64
2406 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2407 SetPagePinned(virt_to_page(level3_user_vsyscall));
2409 xen_mark_init_mm_pinned();
2412 static void xen_leave_lazy_mmu(void)
2416 paravirt_leave_lazy_mmu();
2420 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2421 .read_cr2 = xen_read_cr2,
2422 .write_cr2 = xen_write_cr2,
2424 .read_cr3 = xen_read_cr3,
2425 .write_cr3 = xen_write_cr3_init,
2427 .flush_tlb_user = xen_flush_tlb,
2428 .flush_tlb_kernel = xen_flush_tlb,
2429 .flush_tlb_single = xen_flush_tlb_single,
2430 .flush_tlb_others = xen_flush_tlb_others,
2432 .pte_update = paravirt_nop,
2434 .pgd_alloc = xen_pgd_alloc,
2435 .pgd_free = xen_pgd_free,
2437 .alloc_pte = xen_alloc_pte_init,
2438 .release_pte = xen_release_pte_init,
2439 .alloc_pmd = xen_alloc_pmd_init,
2440 .release_pmd = xen_release_pmd_init,
2442 .set_pte = xen_set_pte_init,
2443 .set_pte_at = xen_set_pte_at,
2444 .set_pmd = xen_set_pmd_hyper,
2446 .ptep_modify_prot_start = __ptep_modify_prot_start,
2447 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2449 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2450 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2452 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2453 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2455 #ifdef CONFIG_X86_PAE
2456 .set_pte_atomic = xen_set_pte_atomic,
2457 .pte_clear = xen_pte_clear,
2458 .pmd_clear = xen_pmd_clear,
2459 #endif /* CONFIG_X86_PAE */
2460 .set_pud = xen_set_pud_hyper,
2462 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2463 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2465 #if CONFIG_PGTABLE_LEVELS >= 4
2466 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2467 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2468 .set_p4d = xen_set_p4d_hyper,
2470 .alloc_pud = xen_alloc_pmd_init,
2471 .release_pud = xen_release_pmd_init,
2472 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
2474 .activate_mm = xen_activate_mm,
2475 .dup_mmap = xen_dup_mmap,
2476 .exit_mmap = xen_exit_mmap,
2479 .enter = paravirt_enter_lazy_mmu,
2480 .leave = xen_leave_lazy_mmu,
2481 .flush = paravirt_flush_lazy_mmu,
2484 .set_fixmap = xen_set_fixmap,
2487 void __init xen_init_mmu_ops(void)
2489 x86_init.paging.pagetable_init = xen_pagetable_init;
2491 pv_mmu_ops = xen_mmu_ops;
2493 memset(dummy_mapping, 0xff, PAGE_SIZE);
2496 /* Protected by xen_reservation_lock. */
2497 #define MAX_CONTIG_ORDER 9 /* 2MB */
2498 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2500 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2501 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2502 unsigned long *in_frames,
2503 unsigned long *out_frames)
2506 struct multicall_space mcs;
2509 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2510 mcs = __xen_mc_entry(0);
2513 in_frames[i] = virt_to_mfn(vaddr);
2515 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2516 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2519 out_frames[i] = virt_to_pfn(vaddr);
2525 * Update the pfn-to-mfn mappings for a virtual address range, either to
2526 * point to an array of mfns, or contiguously from a single starting
2529 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2530 unsigned long *mfns,
2531 unsigned long first_mfn)
2538 limit = 1u << order;
2539 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2540 struct multicall_space mcs;
2543 mcs = __xen_mc_entry(0);
2547 mfn = first_mfn + i;
2549 if (i < (limit - 1))
2553 flags = UVMF_INVLPG | UVMF_ALL;
2555 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2558 MULTI_update_va_mapping(mcs.mc, vaddr,
2559 mfn_pte(mfn, PAGE_KERNEL), flags);
2561 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2568 * Perform the hypercall to exchange a region of our pfns to point to
2569 * memory with the required contiguous alignment. Takes the pfns as
2570 * input, and populates mfns as output.
2572 * Returns a success code indicating whether the hypervisor was able to
2573 * satisfy the request or not.
2575 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2576 unsigned long *pfns_in,
2577 unsigned long extents_out,
2578 unsigned int order_out,
2579 unsigned long *mfns_out,
2580 unsigned int address_bits)
2585 struct xen_memory_exchange exchange = {
2587 .nr_extents = extents_in,
2588 .extent_order = order_in,
2589 .extent_start = pfns_in,
2593 .nr_extents = extents_out,
2594 .extent_order = order_out,
2595 .extent_start = mfns_out,
2596 .address_bits = address_bits,
2601 BUG_ON(extents_in << order_in != extents_out << order_out);
2603 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2604 success = (exchange.nr_exchanged == extents_in);
2606 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2607 BUG_ON(success && (rc != 0));
2612 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2613 unsigned int address_bits,
2614 dma_addr_t *dma_handle)
2616 unsigned long *in_frames = discontig_frames, out_frame;
2617 unsigned long flags;
2619 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2622 * Currently an auto-translated guest will not perform I/O, nor will
2623 * it require PAE page directories below 4GB. Therefore any calls to
2624 * this function are redundant and can be ignored.
2627 if (unlikely(order > MAX_CONTIG_ORDER))
2630 memset((void *) vstart, 0, PAGE_SIZE << order);
2632 spin_lock_irqsave(&xen_reservation_lock, flags);
2634 /* 1. Zap current PTEs, remembering MFNs. */
2635 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2637 /* 2. Get a new contiguous memory extent. */
2638 out_frame = virt_to_pfn(vstart);
2639 success = xen_exchange_memory(1UL << order, 0, in_frames,
2640 1, order, &out_frame,
2643 /* 3. Map the new extent in place of old pages. */
2645 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2647 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2649 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2651 *dma_handle = virt_to_machine(vstart).maddr;
2652 return success ? 0 : -ENOMEM;
2654 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2656 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2658 unsigned long *out_frames = discontig_frames, in_frame;
2659 unsigned long flags;
2661 unsigned long vstart;
2663 if (unlikely(order > MAX_CONTIG_ORDER))
2666 vstart = (unsigned long)phys_to_virt(pstart);
2667 memset((void *) vstart, 0, PAGE_SIZE << order);
2669 spin_lock_irqsave(&xen_reservation_lock, flags);
2671 /* 1. Find start MFN of contiguous extent. */
2672 in_frame = virt_to_mfn(vstart);
2674 /* 2. Zap current PTEs. */
2675 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2677 /* 3. Do the exchange for non-contiguous MFNs. */
2678 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2681 /* 4. Map new pages in place of old pages. */
2683 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2685 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2687 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2689 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2691 #ifdef CONFIG_KEXEC_CORE
2692 phys_addr_t paddr_vmcoreinfo_note(void)
2694 if (xen_pv_domain())
2695 return virt_to_machine(vmcoreinfo_note).maddr;
2697 return __pa(vmcoreinfo_note);
2699 #endif /* CONFIG_KEXEC_CORE */