1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
6 #include <linux/kernel.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
12 #include <asm/cacheflush.h>
14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
15 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21 static inline void flush_kernel_dcache_page(struct page *page)
24 static inline void flush_kernel_vmap_range(void *vaddr, int size)
27 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
32 #include <asm/kmap_types.h>
35 extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36 extern void kunmap_atomic_high(void *kvaddr);
37 #include <asm/highmem.h>
39 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
40 static inline void kmap_flush_tlb(unsigned long addr) { }
44 #define kmap_prot PAGE_KERNEL
47 void *kmap_high(struct page *page);
48 static inline void *kmap(struct page *page)
53 if (!PageHighMem(page))
54 addr = page_address(page);
56 addr = kmap_high(page);
57 kmap_flush_tlb((unsigned long)addr);
61 void kunmap_high(struct page *page);
63 static inline void kunmap(struct page *page)
66 if (!PageHighMem(page))
72 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
73 * no global lock is needed and because the kmap code must perform a global TLB
74 * invalidation when the kmap pool wraps.
76 * However when holding an atomic kmap it is not legal to sleep, so atomic
77 * kmaps are appropriate for short, tight code paths only.
79 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
80 * gives a more generic (and caching) interface. But kmap_atomic can
81 * be used in IRQ contexts, so in some (very limited) cases we need
84 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
88 if (!PageHighMem(page))
89 return page_address(page);
90 return kmap_atomic_high_prot(page, prot);
92 #define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
94 /* declarations for linux/mm/highmem.c */
95 unsigned int nr_free_highpages(void);
96 extern atomic_long_t _totalhigh_pages;
97 static inline unsigned long totalhigh_pages(void)
99 return (unsigned long)atomic_long_read(&_totalhigh_pages);
102 static inline void totalhigh_pages_inc(void)
104 atomic_long_inc(&_totalhigh_pages);
107 static inline void totalhigh_pages_dec(void)
109 atomic_long_dec(&_totalhigh_pages);
112 static inline void totalhigh_pages_add(long count)
114 atomic_long_add(count, &_totalhigh_pages);
117 static inline void totalhigh_pages_set(long val)
119 atomic_long_set(&_totalhigh_pages, val);
122 void kmap_flush_unused(void);
124 struct page *kmap_to_page(void *addr);
126 #else /* CONFIG_HIGHMEM */
128 static inline unsigned int nr_free_highpages(void) { return 0; }
130 static inline struct page *kmap_to_page(void *addr)
132 return virt_to_page(addr);
135 static inline unsigned long totalhigh_pages(void) { return 0UL; }
137 static inline void *kmap(struct page *page)
140 return page_address(page);
143 static inline void kunmap_high(struct page *page)
147 static inline void kunmap(struct page *page)
149 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
150 kunmap_flush_on_unmap(page_address(page));
154 static inline void *kmap_atomic(struct page *page)
158 return page_address(page);
160 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
162 static inline void kunmap_atomic_high(void *addr)
165 * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
166 * handles re-enabling faults + preemption
168 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
169 kunmap_flush_on_unmap(addr);
173 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
175 #define kmap_flush_unused() do {} while(0)
177 #endif /* CONFIG_HIGHMEM */
179 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
181 DECLARE_PER_CPU(int, __kmap_atomic_idx);
183 static inline int kmap_atomic_idx_push(void)
185 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
187 #ifdef CONFIG_DEBUG_HIGHMEM
188 WARN_ON_ONCE(in_irq() && !irqs_disabled());
189 BUG_ON(idx >= KM_TYPE_NR);
194 static inline int kmap_atomic_idx(void)
196 return __this_cpu_read(__kmap_atomic_idx) - 1;
199 static inline void kmap_atomic_idx_pop(void)
201 #ifdef CONFIG_DEBUG_HIGHMEM
202 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
206 __this_cpu_dec(__kmap_atomic_idx);
213 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
214 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
216 #define kunmap_atomic(addr) \
218 BUILD_BUG_ON(__same_type((addr), struct page *)); \
219 kunmap_atomic_high(addr); \
220 pagefault_enable(); \
225 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
226 #ifndef clear_user_highpage
227 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
229 void *addr = kmap_atomic(page);
230 clear_user_page(addr, vaddr, page);
235 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
237 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
238 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
239 * @vma: The VMA the page is to be allocated for
240 * @vaddr: The virtual address the page will be inserted into
242 * This function will allocate a page for a VMA but the caller is expected
243 * to specify via movableflags whether the page will be movable in the
246 * An architecture may override this function by defining
247 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
250 static inline struct page *
251 __alloc_zeroed_user_highpage(gfp_t movableflags,
252 struct vm_area_struct *vma,
255 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
259 clear_user_highpage(page, vaddr);
266 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
267 * @vma: The VMA the page is to be allocated for
268 * @vaddr: The virtual address the page will be inserted into
270 * This function will allocate a page for a VMA that the caller knows will
271 * be able to migrate in the future using move_pages() or reclaimed
273 static inline struct page *
274 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
277 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
280 static inline void clear_highpage(struct page *page)
282 void *kaddr = kmap_atomic(page);
284 kunmap_atomic(kaddr);
287 static inline void zero_user_segments(struct page *page,
288 unsigned start1, unsigned end1,
289 unsigned start2, unsigned end2)
291 void *kaddr = kmap_atomic(page);
293 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
296 memset(kaddr + start1, 0, end1 - start1);
299 memset(kaddr + start2, 0, end2 - start2);
301 kunmap_atomic(kaddr);
302 flush_dcache_page(page);
305 static inline void zero_user_segment(struct page *page,
306 unsigned start, unsigned end)
308 zero_user_segments(page, start, end, 0, 0);
311 static inline void zero_user(struct page *page,
312 unsigned start, unsigned size)
314 zero_user_segments(page, start, start + size, 0, 0);
317 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
319 static inline void copy_user_highpage(struct page *to, struct page *from,
320 unsigned long vaddr, struct vm_area_struct *vma)
324 vfrom = kmap_atomic(from);
325 vto = kmap_atomic(to);
326 copy_user_page(vto, vfrom, vaddr, to);
328 kunmap_atomic(vfrom);
333 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
335 static inline void copy_highpage(struct page *to, struct page *from)
339 vfrom = kmap_atomic(from);
340 vto = kmap_atomic(to);
341 copy_page(vto, vfrom);
343 kunmap_atomic(vfrom);
348 #endif /* _LINUX_HIGHMEM_H */