unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
+ if (remap_pfn == 0)
+ remap_pfn = nr_pages;
+
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i;
return remap_pfn;
}
-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
+static unsigned long __init xen_count_remap_pages(
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long remap_pages)
+{
+ if (start_pfn >= nr_pages)
+ return remap_pages;
+
+ return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+}
+
+static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+ unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned long nr_pages, unsigned long last_val))
{
phys_addr_t start = 0;
- unsigned long last_pfn = nr_pages;
+ unsigned long ret_val = 0;
const struct e820entry *entry = xen_e820_map;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
- * end of the map) is reached, then set the 1:1 map and
- * remap the memory in those non-RAM regions.
+ * end of the map) is reached, then call the provided function
+ * to perform its duty on the non-RAM region.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
- last_pfn = xen_set_identity_and_remap_chunk(
- start_pfn, end_pfn, nr_pages,
- last_pfn);
+ ret_val = func(start_pfn, end_pfn, nr_pages,
+ ret_val);
start = end;
}
}
- pr_info("Released %ld page(s)\n", xen_released_pages);
+ return ret_val;
}
/*
}
}
-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
-{
- unsigned long extra = 0;
- unsigned long start_pfn, end_pfn;
- const struct e820entry *entry = xen_e820_map;
- int i;
-
- end_pfn = 0;
- for (i = 0; i < xen_e820_map_entries; i++, entry++) {
- start_pfn = PFN_DOWN(entry->addr);
- /* Adjacent regions on non-page boundaries handling! */
- end_pfn = min(end_pfn, start_pfn);
-
- if (start_pfn >= max_pfn)
- return extra + max_pfn - end_pfn;
-
- /* Add any holes in map to result. */
- extra += start_pfn - end_pfn;
-
- end_pfn = PFN_UP(entry->addr + entry->size);
- end_pfn = min(end_pfn, max_pfn);
-
- if (entry->type != E820_RAM)
- extra += end_pfn - start_pfn;
- }
-
- return extra;
-}
-
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
struct e820entry *entry;
max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */
- max_pages += xen_count_remap_pages(max_pfn);
+ max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn;
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
- xen_set_identity_and_remap(max_pfn);
+ xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+
+ pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen";
}