* @page_shift: page_shift argument passed to vmap_range_noflush().
*
* KMSAN creates new metadata pages for the physical pages mapped into the
- * virtual memory.
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
*/
-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int page_shift);
+int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
/**
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
{
}
-static inline void kmsan_ioremap_page_range(unsigned long start,
- unsigned long end,
- phys_addr_t phys_addr,
- pgprot_t prot,
- unsigned int page_shift)
+static inline int kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift)
{
+ return 0;
}
static inline void kmsan_iounmap_page_range(unsigned long start,
* into the virtual memory. If those physical pages already had shadow/origin,
* those are ignored.
*/
-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
- phys_addr_t phys_addr, pgprot_t prot,
- unsigned int page_shift)
+int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift)
{
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
struct page *shadow, *origin;
unsigned long off = 0;
- int nr;
+ int nr, err = 0, clean = 0, mapped;
if (!kmsan_enabled || kmsan_in_runtime())
- return;
+ return 0;
nr = (end - start) / PAGE_SIZE;
kmsan_enter_runtime();
- for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
+ for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
shadow = alloc_pages(gfp_mask, 1);
origin = alloc_pages(gfp_mask, 1);
- __vmap_pages_range_noflush(
+ if (!shadow || !origin) {
+ err = -ENOMEM;
+ goto ret;
+ }
+ mapped = __vmap_pages_range_noflush(
vmalloc_shadow(start + off),
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
PAGE_SHIFT);
- __vmap_pages_range_noflush(
+ if (mapped) {
+ err = mapped;
+ goto ret;
+ }
+ shadow = NULL;
+ mapped = __vmap_pages_range_noflush(
vmalloc_origin(start + off),
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
PAGE_SHIFT);
+ if (mapped) {
+ __vunmap_range_noflush(
+ vmalloc_shadow(start + off),
+ vmalloc_shadow(start + off + PAGE_SIZE));
+ err = mapped;
+ goto ret;
+ }
+ origin = NULL;
+ }
+ /* Page mapping loop finished normally, nothing to clean up. */
+ clean = 0;
+
+ret:
+ if (clean > 0) {
+ /*
+ * Something went wrong. Clean up shadow/origin pages allocated
+ * on the last loop iteration, then delete mappings created
+ * during the previous iterations.
+ */
+ if (shadow)
+ __free_pages(shadow, 1);
+ if (origin)
+ __free_pages(origin, 1);
+ __vunmap_range_noflush(
+ vmalloc_shadow(start),
+ vmalloc_shadow(start + clean * PAGE_SIZE));
+ __vunmap_range_noflush(
+ vmalloc_origin(start),
+ vmalloc_origin(start + clean * PAGE_SIZE));
}
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
kmsan_leave_runtime();
+ return err;
}
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)