1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
4 #include <linux/kernel.h>
6 #include <linux/memremap.h>
7 #include <linux/slab.h>
14 static DEFINE_MUTEX(list_lock);
15 static LIST_HEAD(page_list);
16 static unsigned int list_count;
18 static int fill_list(unsigned int nr_pages)
20 struct dev_pagemap *pgmap;
23 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
26 res = kzalloc(sizeof(*res), GFP_KERNEL);
30 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
34 pgmap->type = MEMORY_DEVICE_GENERIC;
35 res->name = "Xen scratch";
36 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
38 ret = allocate_resource(&iomem_resource, res,
39 alloc_pages * PAGE_SIZE, 0, -1,
40 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
42 pr_err("Cannot allocate new IOMEM resource\n");
46 pgmap->range = (struct range) {
52 #ifdef CONFIG_XEN_HAVE_PVMMU
54 * memremap will build page tables for the new memory so
55 * the p2m must contain invalid entries so the correct
56 * non-present PTEs will be written.
58 * If a failure occurs, the original (identity) p2m entries
59 * are not restored since this region is now known not to
60 * conflict with any devices.
62 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
63 xen_pfn_t pfn = PFN_DOWN(res->start);
65 for (i = 0; i < alloc_pages; i++) {
66 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
67 pr_warn("set_phys_to_machine() failed, no memory added\n");
75 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
77 pr_err("Cannot remap memory range\n");
82 for (i = 0; i < alloc_pages; i++) {
83 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
85 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
86 list_add(&pg->lru, &page_list);
93 release_resource(res);
102 * xen_alloc_unpopulated_pages - alloc unpopulated pages
103 * @nr_pages: Number of pages
104 * @pages: pages returned
105 * @return 0 on success, error otherwise
107 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
112 mutex_lock(&list_lock);
113 if (list_count < nr_pages) {
114 ret = fill_list(nr_pages - list_count);
119 for (i = 0; i < nr_pages; i++) {
120 struct page *pg = list_first_entry_or_null(&page_list,
129 #ifdef CONFIG_XEN_HAVE_PVMMU
130 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
131 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
135 for (j = 0; j <= i; j++) {
136 list_add(&pages[j]->lru, &page_list);
146 mutex_unlock(&list_lock);
149 EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
152 * xen_free_unpopulated_pages - return unpopulated pages
153 * @nr_pages: Number of pages
154 * @pages: pages to return
156 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
160 mutex_lock(&list_lock);
161 for (i = 0; i < nr_pages; i++) {
162 list_add(&pages[i]->lru, &page_list);
165 mutex_unlock(&list_lock);
167 EXPORT_SYMBOL(xen_free_unpopulated_pages);
170 static int __init init(void)
177 if (!xen_pv_domain())
181 * Initialize with pages from the extra memory regions (see
182 * arch/x86/xen/setup.c).
184 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
187 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
189 pfn_to_page(xen_extra_mem[i].start_pfn + j);
191 list_add(&pg->lru, &page_list);
198 subsys_initcall(init);