1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kernel.h>
8 #include <linux/static_key.h>
9 #include <linux/types.h>
19 #include <linux/linkage.h>
20 #include <asm/kasan.h>
24 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
27 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
28 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
29 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33 #include <linux/pgtable.h>
35 /* Software KASAN implementations use shadow memory. */
37 #ifdef CONFIG_KASAN_SW_TAGS
38 /* This matches KASAN_TAG_INVALID. */
39 #define KASAN_SHADOW_INIT 0xFE
41 #define KASAN_SHADOW_INIT 0
44 #ifndef PTE_HWTABLE_PTRS
45 #define PTE_HWTABLE_PTRS 0
48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54 int kasan_populate_early_shadow(const void *shadow_start,
55 const void *shadow_end);
57 #ifndef kasan_mem_to_shadow
58 static inline void *kasan_mem_to_shadow(const void *addr)
60 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
61 + KASAN_SHADOW_OFFSET;
65 int kasan_add_zero_shadow(void *start, unsigned long size);
66 void kasan_remove_zero_shadow(void *start, unsigned long size);
68 /* Enable reporting bugs after kasan_disable_current() */
69 extern void kasan_enable_current(void);
71 /* Disable reporting bugs for current task */
72 extern void kasan_disable_current(void);
74 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
80 static inline void kasan_remove_zero_shadow(void *start,
84 static inline void kasan_enable_current(void) {}
85 static inline void kasan_disable_current(void) {}
87 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
89 #ifdef CONFIG_KASAN_HW_TAGS
91 #else /* CONFIG_KASAN_HW_TAGS */
93 #endif /* CONFIG_KASAN_HW_TAGS */
95 static inline bool kasan_has_integrated_init(void)
97 return kasan_hw_tags_enabled();
101 void __kasan_unpoison_range(const void *addr, size_t size);
102 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
105 __kasan_unpoison_range(addr, size);
108 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
109 static __always_inline void kasan_poison_pages(struct page *page,
110 unsigned int order, bool init)
113 __kasan_poison_pages(page, order, init);
116 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
117 static __always_inline bool kasan_unpoison_pages(struct page *page,
118 unsigned int order, bool init)
121 return __kasan_unpoison_pages(page, order, init);
125 void __kasan_poison_slab(struct slab *slab);
126 static __always_inline void kasan_poison_slab(struct slab *slab)
129 __kasan_poison_slab(slab);
132 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
133 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
137 __kasan_unpoison_object_data(cache, object);
140 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
141 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
145 __kasan_poison_object_data(cache, object);
148 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
150 static __always_inline void * __must_check kasan_init_slab_obj(
151 struct kmem_cache *cache, const void *object)
154 return __kasan_init_slab_obj(cache, object);
155 return (void *)object;
158 bool __kasan_slab_free(struct kmem_cache *s, void *object,
159 unsigned long ip, bool init);
160 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
161 void *object, bool init)
164 return __kasan_slab_free(s, object, _RET_IP_, init);
168 void __kasan_kfree_large(void *ptr, unsigned long ip);
169 static __always_inline void kasan_kfree_large(void *ptr)
172 __kasan_kfree_large(ptr, _RET_IP_);
175 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
176 void *object, gfp_t flags, bool init);
177 static __always_inline void * __must_check kasan_slab_alloc(
178 struct kmem_cache *s, void *object, gfp_t flags, bool init)
181 return __kasan_slab_alloc(s, object, flags, init);
185 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
186 size_t size, gfp_t flags);
187 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
188 const void *object, size_t size, gfp_t flags)
191 return __kasan_kmalloc(s, object, size, flags);
192 return (void *)object;
195 void * __must_check __kasan_kmalloc_large(const void *ptr,
196 size_t size, gfp_t flags);
197 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
198 size_t size, gfp_t flags)
201 return __kasan_kmalloc_large(ptr, size, flags);
205 void * __must_check __kasan_krealloc(const void *object,
206 size_t new_size, gfp_t flags);
207 static __always_inline void * __must_check kasan_krealloc(const void *object,
208 size_t new_size, gfp_t flags)
211 return __kasan_krealloc(object, new_size, flags);
212 return (void *)object;
215 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
218 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
219 * @page: Pointer to the page allocation.
220 * @order: Order of the allocation.
222 * This function is intended for kernel subsystems that cache page allocations
223 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
225 * This function is similar to kasan_mempool_poison_object() but operates on
228 * Before the poisoned allocation can be reused, it must be unpoisoned via
229 * kasan_mempool_unpoison_pages().
231 * Return: true if the allocation can be safely reused; false otherwise.
233 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
237 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
241 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
244 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
245 * @page: Pointer to the page allocation.
246 * @order: Order of the allocation.
248 * This function is intended for kernel subsystems that cache page allocations
249 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
251 * This function unpoisons a page allocation that was previously poisoned by
252 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
253 * the tag-based modes, this function assigns a new tag to the allocation.
255 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
259 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
262 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
264 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
265 * @ptr: Pointer to the slab allocation.
267 * This function is intended for kernel subsystems that cache slab allocations
268 * to reuse them instead of freeing them back to the slab allocator (e.g.
271 * This function poisons a slab allocation without initializing its memory and
272 * without putting it into the quarantine (for the Generic mode).
274 * This function also performs checks to detect double-free and invalid-free
275 * bugs and reports them. The caller can use the return value of this function
276 * to find out if the allocation is buggy.
278 * Before the poisoned allocation can be reused, it must be unpoisoned via
279 * kasan_mempool_unpoison_object().
281 * This function operates on all slab allocations including large kmalloc
282 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
283 * size > KMALLOC_MAX_SIZE).
285 * Return: true if the allocation can be safely reused; false otherwise.
287 static __always_inline bool kasan_mempool_poison_object(void *ptr)
290 return __kasan_mempool_poison_object(ptr, _RET_IP_);
294 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
296 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
297 * @ptr: Pointer to the slab allocation.
298 * @size: Size to be unpoisoned.
300 * This function is intended for kernel subsystems that cache slab allocations
301 * to reuse them instead of freeing them back to the slab allocator (e.g.
304 * This function unpoisons a slab allocation that was previously poisoned via
305 * kasan_mempool_poison_object() without initializing its memory. For the
306 * tag-based modes, this function does not assign a new tag to the allocation
307 * and instead restores the original tags based on the pointer value.
309 * This function operates on all slab allocations including large kmalloc
310 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
311 * size > KMALLOC_MAX_SIZE).
313 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
317 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
321 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
322 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
324 bool __kasan_check_byte(const void *addr, unsigned long ip);
325 static __always_inline bool kasan_check_byte(const void *addr)
328 return __kasan_check_byte(addr, _RET_IP_);
332 #else /* CONFIG_KASAN */
334 static inline void kasan_unpoison_range(const void *address, size_t size) {}
335 static inline void kasan_poison_pages(struct page *page, unsigned int order,
337 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
342 static inline void kasan_poison_slab(struct slab *slab) {}
343 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
345 static inline void kasan_poison_object_data(struct kmem_cache *cache,
347 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
350 return (void *)object;
352 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
356 static inline void kasan_kfree_large(void *ptr) {}
357 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
358 gfp_t flags, bool init)
362 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
363 size_t size, gfp_t flags)
365 return (void *)object;
367 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
371 static inline void *kasan_krealloc(const void *object, size_t new_size,
374 return (void *)object;
376 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
380 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
381 static inline bool kasan_mempool_poison_object(void *ptr)
385 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
387 static inline bool kasan_check_byte(const void *address)
392 #endif /* CONFIG_KASAN */
394 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
395 void kasan_unpoison_task_stack(struct task_struct *task);
396 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
398 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
399 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
402 #ifdef CONFIG_KASAN_GENERIC
405 int alloc_meta_offset;
406 int free_meta_offset;
409 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
410 slab_flags_t kasan_never_merge(void);
411 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
412 slab_flags_t *flags);
414 void kasan_cache_shrink(struct kmem_cache *cache);
415 void kasan_cache_shutdown(struct kmem_cache *cache);
416 void kasan_record_aux_stack(void *ptr);
417 void kasan_record_aux_stack_noalloc(void *ptr);
419 #else /* CONFIG_KASAN_GENERIC */
421 /* Tag-based KASAN modes do not use per-object metadata. */
422 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
427 /* And thus nothing prevents cache merging. */
428 static inline slab_flags_t kasan_never_merge(void)
432 /* And no cache-related metadata initialization is required. */
433 static inline void kasan_cache_create(struct kmem_cache *cache,
435 slab_flags_t *flags) {}
437 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
438 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
439 static inline void kasan_record_aux_stack(void *ptr) {}
440 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
442 #endif /* CONFIG_KASAN_GENERIC */
444 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
446 static inline void *kasan_reset_tag(const void *addr)
448 return (void *)arch_kasan_reset_tag(addr);
452 * kasan_report - print a report about a bad memory access detected by KASAN
453 * @addr: address of the bad access
454 * @size: size of the bad access
455 * @is_write: whether the bad access is a write or a read
456 * @ip: instruction pointer for the accessibility check or the bad access itself
458 bool kasan_report(const void *addr, size_t size,
459 bool is_write, unsigned long ip);
461 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
463 static inline void *kasan_reset_tag(const void *addr)
468 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
470 #ifdef CONFIG_KASAN_HW_TAGS
472 void kasan_report_async(void);
474 #endif /* CONFIG_KASAN_HW_TAGS */
476 #ifdef CONFIG_KASAN_SW_TAGS
477 void __init kasan_init_sw_tags(void);
479 static inline void kasan_init_sw_tags(void) { }
482 #ifdef CONFIG_KASAN_HW_TAGS
483 void kasan_init_hw_tags_cpu(void);
484 void __init kasan_init_hw_tags(void);
486 static inline void kasan_init_hw_tags_cpu(void) { }
487 static inline void kasan_init_hw_tags(void) { }
490 #ifdef CONFIG_KASAN_VMALLOC
492 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
494 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
495 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
496 void kasan_release_vmalloc(unsigned long start, unsigned long end,
497 unsigned long free_region_start,
498 unsigned long free_region_end);
500 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
502 static inline void kasan_populate_early_vm_area_shadow(void *start,
505 static inline int kasan_populate_vmalloc(unsigned long start,
510 static inline void kasan_release_vmalloc(unsigned long start,
512 unsigned long free_region_start,
513 unsigned long free_region_end) { }
515 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
517 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
518 kasan_vmalloc_flags_t flags);
519 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
521 kasan_vmalloc_flags_t flags)
524 return __kasan_unpoison_vmalloc(start, size, flags);
525 return (void *)start;
528 void __kasan_poison_vmalloc(const void *start, unsigned long size);
529 static __always_inline void kasan_poison_vmalloc(const void *start,
533 __kasan_poison_vmalloc(start, size);
536 #else /* CONFIG_KASAN_VMALLOC */
538 static inline void kasan_populate_early_vm_area_shadow(void *start,
539 unsigned long size) { }
540 static inline int kasan_populate_vmalloc(unsigned long start,
545 static inline void kasan_release_vmalloc(unsigned long start,
547 unsigned long free_region_start,
548 unsigned long free_region_end) { }
550 static inline void *kasan_unpoison_vmalloc(const void *start,
552 kasan_vmalloc_flags_t flags)
554 return (void *)start;
556 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
559 #endif /* CONFIG_KASAN_VMALLOC */
561 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
562 !defined(CONFIG_KASAN_VMALLOC)
565 * These functions allocate and free shadow memory for kernel modules.
566 * They are only required when KASAN_VMALLOC is not supported, as otherwise
567 * shadow memory is allocated by the generic vmalloc handlers.
569 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
570 void kasan_free_module_shadow(const struct vm_struct *vm);
572 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
574 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
575 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
577 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
579 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
580 void kasan_non_canonical_hook(unsigned long addr);
581 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
582 static inline void kasan_non_canonical_hook(unsigned long addr) { }
583 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
585 #endif /* LINUX_KASAN_H */