1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_XEN_PAGE_H
3 #define _ASM_X86_XEN_PAGE_H
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/device.h>
12 #include <asm/extable.h>
15 #include <xen/interface/xen.h>
16 #include <xen/interface/grant_table.h>
17 #include <xen/features.h>
19 /* Xen machine address */
20 typedef struct xmaddr {
24 /* Xen pseudo-physical address */
25 typedef struct xpaddr {
30 #define XEN_PHYSICAL_MASK __sme_clr((1UL << 52) - 1)
32 #define XEN_PHYSICAL_MASK __PHYSICAL_MASK
35 #define XEN_PTE_MFN_MASK ((pteval_t)(((signed long)PAGE_MASK) & \
38 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
39 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
41 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
42 #define INVALID_P2M_ENTRY (~0UL)
43 #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
44 #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
45 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
46 #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
48 #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
50 extern unsigned long *machine_to_phys_mapping;
51 extern unsigned long machine_to_phys_nr;
52 extern unsigned long *xen_p2m_addr;
53 extern unsigned long xen_p2m_size;
54 extern unsigned long xen_max_p2m_pfn;
56 extern int xen_alloc_p2m_entry(unsigned long pfn);
58 extern unsigned long get_phys_to_machine(unsigned long pfn);
59 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
60 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
61 extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
65 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
66 struct gnttab_map_grant_ref *kmap_ops,
67 struct page **pages, unsigned int count);
68 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
69 struct gnttab_unmap_grant_ref *kunmap_ops,
70 struct page **pages, unsigned int count);
73 set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
74 struct gnttab_map_grant_ref *kmap_ops,
75 struct page **pages, unsigned int count)
81 clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
82 struct gnttab_unmap_grant_ref *kunmap_ops,
83 struct page **pages, unsigned int count)
90 * The maximum amount of extra memory compared to the base size. The
91 * main scaling factor is the size of struct page. At extreme ratios
92 * of base:extra, all the base memory can be filled with page
93 * structures for the extra memory, leaving no space for anything
96 * 10x seems like a reasonable balance between scaling flexibility and
97 * leaving a practically usable system.
99 #define XEN_EXTRA_MEM_RATIO (10)
102 * Helper functions to write or read unsigned long values to/from
103 * memory, when the access may fault.
105 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
109 asm volatile("1: mov %[val], %[ptr]\n"
111 ".section .fixup, \"ax\"\n"
112 "3: sub $1, %[ret]\n"
116 : [ret] "+r" (ret), [ptr] "=m" (*addr)
122 static inline int xen_safe_read_ulong(const unsigned long *addr,
126 unsigned long rval = ~0ul;
128 asm volatile("1: mov %[ptr], %[rval]\n"
130 ".section .fixup, \"ax\"\n"
131 "3: sub $1, %[ret]\n"
135 : [ret] "+r" (ret), [rval] "+r" (rval)
136 : [ptr] "m" (*addr));
144 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
145 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
146 * bits (identity or foreign) are set.
147 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
148 * identity or foreign indicator will be still set. __pfn_to_mfn() is
149 * encapsulating get_phys_to_machine() which is called in special cases only.
150 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
151 * cases needing an extended handling.
153 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
157 if (pfn < xen_p2m_size)
158 mfn = xen_p2m_addr[pfn];
159 else if (unlikely(pfn < xen_max_p2m_pfn))
160 return get_phys_to_machine(pfn);
162 return IDENTITY_FRAME(pfn);
164 if (unlikely(mfn == INVALID_P2M_ENTRY))
165 return get_phys_to_machine(pfn);
170 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
176 static inline unsigned long pfn_to_mfn(unsigned long pfn)
181 * Some x86 code are still using pfn_to_mfn instead of
182 * pfn_to_mfn. This will have to be removed when we figured
185 if (xen_feature(XENFEAT_auto_translated_physmap))
188 mfn = __pfn_to_mfn(pfn);
190 if (mfn != INVALID_P2M_ENTRY)
191 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
196 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
198 if (xen_feature(XENFEAT_auto_translated_physmap))
201 return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
204 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
209 if (unlikely(mfn >= machine_to_phys_nr))
213 * The array access can fail (e.g., device space beyond end of RAM).
214 * In such cases it doesn't matter what we return (we return garbage),
215 * but we must handle the fault without crashing!
217 ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
224 static inline unsigned long mfn_to_pfn(unsigned long mfn)
229 * Some x86 code are still using mfn_to_pfn instead of
230 * gfn_to_pfn. This will have to be removed when we figure
233 if (xen_feature(XENFEAT_auto_translated_physmap))
236 pfn = mfn_to_pfn_no_overrides(mfn);
237 if (__pfn_to_mfn(pfn) != mfn)
241 * pfn is ~0 if there are no entries in the m2p for mfn or the
242 * entry doesn't map back to the mfn.
244 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
250 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
252 unsigned offset = phys.paddr & ~PAGE_MASK;
253 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
256 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
258 unsigned offset = machine.maddr & ~PAGE_MASK;
259 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
262 /* Pseudo-physical <-> Guest conversion */
263 static inline unsigned long pfn_to_gfn(unsigned long pfn)
265 if (xen_feature(XENFEAT_auto_translated_physmap))
268 return pfn_to_mfn(pfn);
271 static inline unsigned long gfn_to_pfn(unsigned long gfn)
273 if (xen_feature(XENFEAT_auto_translated_physmap))
276 return mfn_to_pfn(gfn);
279 /* Pseudo-physical <-> Bus conversion */
280 #define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
281 #define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
284 * We detect special mappings in one of two ways:
285 * 1. If the MFN is an I/O page then Xen will set the m2p entry
286 * to be outside our maximum possible pseudophys range.
287 * 2. If the MFN belongs to a different domain then we will certainly
288 * not have MFN in our p2m table. Conversely, if the page is ours,
289 * then we'll have p2m(m2p(MFN))==MFN.
290 * If we detect a special mapping then it doesn't have a 'struct page'.
291 * We force !pfn_valid() by returning an out-of-range pointer.
293 * NB. These checks require that, for any MFN that is not in our reservation,
294 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
295 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
296 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
298 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
299 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
300 * require. In all the cases we care about, the FOREIGN_FRAME bit is
301 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
303 static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
307 if (xen_feature(XENFEAT_auto_translated_physmap))
310 pfn = mfn_to_pfn(mfn);
311 if (__pfn_to_mfn(pfn) != mfn)
312 return -1; /* force !pfn_valid() */
316 /* VIRT <-> MACHINE conversion */
317 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
318 #define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
319 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
320 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
322 /* VIRT <-> GUEST conversion */
323 #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
324 #define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
326 static inline unsigned long pte_mfn(pte_t pte)
328 return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
331 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
335 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
336 massage_pgprot(pgprot);
341 static inline pteval_t pte_val_ma(pte_t pte)
346 static inline pte_t __pte_ma(pteval_t x)
348 return (pte_t) { .pte = x };
351 #define pmd_val_ma(v) ((v).pmd)
352 #ifdef __PAGETABLE_PUD_FOLDED
353 #define pud_val_ma(v) ((v).p4d.pgd.pgd)
355 #define pud_val_ma(v) ((v).pud)
357 #define __pmd_ma(x) ((pmd_t) { (x) } )
359 #ifdef __PAGETABLE_P4D_FOLDED
360 #define p4d_val_ma(x) ((x).pgd.pgd)
362 #define p4d_val_ma(x) ((x).p4d)
365 xmaddr_t arbitrary_virt_to_machine(void *address);
366 unsigned long arbitrary_virt_to_mfn(void *vaddr);
367 void make_lowmem_page_readonly(void *vaddr);
368 void make_lowmem_page_readwrite(void *vaddr);
370 #define xen_remap(cookie, size) ioremap((cookie), (size))
371 #define xen_unmap(cookie) iounmap((cookie))
373 static inline bool xen_arch_need_swiotlb(struct device *dev,
380 static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
382 return __get_free_pages(__GFP_NOWARN, order);
385 #endif /* _ASM_X86_XEN_PAGE_H */