1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
5 #include <linux/spinlock.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <linux/llist.h>
9 #include <asm/page.h> /* pgprot_t */
10 #include <linux/rbtree.h>
11 #include <linux/overflow.h>
13 #include <asm/vmalloc.h>
15 struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16 struct notifier_block; /* in notifier.h */
18 /* bits in flags of vmalloc's vm_struct below */
19 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20 #define VM_ALLOC 0x00000002 /* vmalloc() */
21 #define VM_MAP 0x00000004 /* vmap()ed pages */
22 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
23 #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
24 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
25 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
26 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
29 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
31 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
32 * shadow memory has been mapped. It's used to handle allocation errors so that
33 * we don't try to poision shadow on free if it was never allocated.
35 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
36 * determine which allocations need the module shadow freed.
40 * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
43 #define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
45 /* bits [20..32] reserved for arch specific ioremap internals */
48 * Maximum alignment for ioremap() regions.
49 * Can be overriden by arch-specific value.
51 #ifndef IOREMAP_MAX_ORDER
52 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
56 struct vm_struct *next;
61 unsigned int nr_pages;
62 phys_addr_t phys_addr;
67 unsigned long va_start;
70 struct rb_node rb_node; /* address sorted rbtree */
71 struct list_head list; /* address sorted list */
74 * The following three variables can be packed, because
75 * a vmap_area object is always one of the three states:
76 * 1) in "free" tree (root is vmap_area_root)
77 * 2) in "busy" tree (root is free_vmap_area_root)
78 * 3) in purge list (head is vmap_purge_list)
81 unsigned long subtree_max_size; /* in "free" tree */
82 struct vm_struct *vm; /* in "busy" tree */
83 struct llist_node purge_list; /* in purge list */
88 * Highlevel APIs for driver use
90 extern void vm_unmap_ram(const void *mem, unsigned int count);
91 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
92 extern void vm_unmap_aliases(void);
95 extern void __init vmalloc_init(void);
96 extern unsigned long vmalloc_nr_pages(void);
98 static inline void vmalloc_init(void)
101 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
104 extern void *vmalloc(unsigned long size);
105 extern void *vzalloc(unsigned long size);
106 extern void *vmalloc_user(unsigned long size);
107 extern void *vmalloc_node(unsigned long size, int node);
108 extern void *vzalloc_node(unsigned long size, int node);
109 extern void *vmalloc_exec(unsigned long size);
110 extern void *vmalloc_32(unsigned long size);
111 extern void *vmalloc_32_user(unsigned long size);
112 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
113 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
114 unsigned long start, unsigned long end, gfp_t gfp_mask,
115 pgprot_t prot, unsigned long vm_flags, int node,
117 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
118 int node, const void *caller);
120 extern void vfree(const void *addr);
121 extern void vfree_atomic(const void *addr);
123 extern void *vmap(struct page **pages, unsigned int count,
124 unsigned long flags, pgprot_t prot);
125 extern void vunmap(const void *addr);
127 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
128 unsigned long uaddr, void *kaddr,
129 unsigned long pgoff, unsigned long size);
131 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
132 unsigned long pgoff);
135 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
136 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
137 * needs to be called.
139 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
140 #define ARCH_PAGE_TABLE_SYNC_MASK 0
144 * There is no default implementation for arch_sync_kernel_mappings(). It is
145 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
148 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
151 * Lowlevel-APIs (not for driver use!)
154 static inline size_t get_vm_area_size(const struct vm_struct *area)
156 if (!(area->flags & VM_NO_GUARD))
157 /* return actual size without guard page */
158 return area->size - PAGE_SIZE;
164 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
165 extern struct vm_struct *get_vm_area_caller(unsigned long size,
166 unsigned long flags, const void *caller);
167 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
169 unsigned long start, unsigned long end,
171 extern struct vm_struct *remove_vm_area(const void *addr);
172 extern struct vm_struct *find_vm_area(const void *addr);
175 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
176 pgprot_t prot, struct page **pages);
177 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
178 struct page **pages);
179 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
180 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
181 static inline void set_vm_flush_reset_perms(void *addr)
183 struct vm_struct *vm = find_vm_area(addr);
186 vm->flags |= VM_FLUSH_RESET_PERMS;
190 map_kernel_range_noflush(unsigned long start, unsigned long size,
191 pgprot_t prot, struct page **pages)
193 return size >> PAGE_SHIFT;
195 #define map_kernel_range map_kernel_range_noflush
197 unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
200 #define unmap_kernel_range unmap_kernel_range_noflush
201 static inline void set_vm_flush_reset_perms(void *addr)
206 /* Allocate/destroy a 'vmalloc' VM area. */
207 extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
208 extern void free_vm_area(struct vm_struct *area);
211 extern long vread(char *buf, char *addr, unsigned long count);
212 extern long vwrite(char *buf, char *addr, unsigned long count);
215 * Internals. Dont't use..
217 extern struct list_head vmap_area_list;
218 extern __init void vm_area_add_early(struct vm_struct *vm);
219 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
223 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
224 const size_t *sizes, int nr_vms,
227 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
229 static inline struct vm_struct **
230 pcpu_get_vm_areas(const unsigned long *offsets,
231 const size_t *sizes, int nr_vms,
238 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
245 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
247 #define VMALLOC_TOTAL 0UL
250 int register_vmap_purge_notifier(struct notifier_block *nb);
251 int unregister_vmap_purge_notifier(struct notifier_block *nb);
253 #endif /* _LINUX_VMALLOC_H */