1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
4 #include <linux/kernel.h>
6 #include <linux/memremap.h>
7 #include <linux/slab.h>
14 static DEFINE_MUTEX(list_lock);
15 static LIST_HEAD(page_list);
16 static unsigned int list_count;
18 static int fill_list(unsigned int nr_pages)
20 struct dev_pagemap *pgmap;
22 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
25 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
29 pgmap->type = MEMORY_DEVICE_GENERIC;
30 pgmap->res.name = "Xen scratch";
31 pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
33 ret = allocate_resource(&iomem_resource, &pgmap->res,
34 alloc_pages * PAGE_SIZE, 0, -1,
35 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
37 pr_err("Cannot allocate new IOMEM resource\n");
42 #ifdef CONFIG_XEN_HAVE_PVMMU
44 * memremap will build page tables for the new memory so
45 * the p2m must contain invalid entries so the correct
46 * non-present PTEs will be written.
48 * If a failure occurs, the original (identity) p2m entries
49 * are not restored since this region is now known not to
50 * conflict with any devices.
52 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
53 xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
55 for (i = 0; i < alloc_pages; i++) {
56 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
57 pr_warn("set_phys_to_machine() failed, no memory added\n");
58 release_resource(&pgmap->res);
66 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
68 pr_err("Cannot remap memory range\n");
69 release_resource(&pgmap->res);
71 return PTR_ERR(vaddr);
74 for (i = 0; i < alloc_pages; i++) {
75 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
77 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
78 list_add(&pg->lru, &page_list);
86 * xen_alloc_unpopulated_pages - alloc unpopulated pages
87 * @nr_pages: Number of pages
88 * @pages: pages returned
89 * @return 0 on success, error otherwise
91 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
96 mutex_lock(&list_lock);
97 if (list_count < nr_pages) {
98 ret = fill_list(nr_pages - list_count);
103 for (i = 0; i < nr_pages; i++) {
104 struct page *pg = list_first_entry_or_null(&page_list,
113 #ifdef CONFIG_XEN_HAVE_PVMMU
114 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
115 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
119 for (j = 0; j <= i; j++) {
120 list_add(&pages[j]->lru, &page_list);
130 mutex_unlock(&list_lock);
133 EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
136 * xen_free_unpopulated_pages - return unpopulated pages
137 * @nr_pages: Number of pages
138 * @pages: pages to return
140 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
144 mutex_lock(&list_lock);
145 for (i = 0; i < nr_pages; i++) {
146 list_add(&pages[i]->lru, &page_list);
149 mutex_unlock(&list_lock);
151 EXPORT_SYMBOL(xen_free_unpopulated_pages);
154 static int __init init(void)
161 if (!xen_pv_domain())
165 * Initialize with pages from the extra memory regions (see
166 * arch/x86/xen/setup.c).
168 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
171 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
173 pfn_to_page(xen_extra_mem[i].start_pfn + j);
175 list_add(&pg->lru, &page_list);
182 subsys_initcall(init);