1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kernel.h>
7 #include <linux/static_key.h>
8 #include <linux/types.h>
18 #include <linux/linkage.h>
19 #include <asm/kasan.h>
21 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
22 struct kunit_kasan_expectation {
28 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
30 #include <linux/pgtable.h>
32 /* Software KASAN implementations use shadow memory. */
34 #ifdef CONFIG_KASAN_SW_TAGS
35 /* This matches KASAN_TAG_INVALID. */
36 #define KASAN_SHADOW_INIT 0xFE
38 #define KASAN_SHADOW_INIT 0
41 #ifndef PTE_HWTABLE_PTRS
42 #define PTE_HWTABLE_PTRS 0
45 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
46 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
47 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
48 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
49 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
51 int kasan_populate_early_shadow(const void *shadow_start,
52 const void *shadow_end);
54 static inline void *kasan_mem_to_shadow(const void *addr)
56 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
57 + KASAN_SHADOW_OFFSET;
60 int kasan_add_zero_shadow(void *start, unsigned long size);
61 void kasan_remove_zero_shadow(void *start, unsigned long size);
63 /* Enable reporting bugs after kasan_disable_current() */
64 extern void kasan_enable_current(void);
66 /* Disable reporting bugs for current task */
67 extern void kasan_disable_current(void);
69 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
71 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
75 static inline void kasan_remove_zero_shadow(void *start,
79 static inline void kasan_enable_current(void) {}
80 static inline void kasan_disable_current(void) {}
82 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84 #ifdef CONFIG_KASAN_HW_TAGS
86 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
88 static __always_inline bool kasan_enabled(void)
90 return static_branch_likely(&kasan_flag_enabled);
93 static inline bool kasan_hw_tags_enabled(void)
95 return kasan_enabled();
98 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
99 void kasan_free_pages(struct page *page, unsigned int order);
101 #else /* CONFIG_KASAN_HW_TAGS */
103 static inline bool kasan_enabled(void)
105 return IS_ENABLED(CONFIG_KASAN);
108 static inline bool kasan_hw_tags_enabled(void)
113 static __always_inline void kasan_alloc_pages(struct page *page,
114 unsigned int order, gfp_t flags)
116 /* Only available for integrated init. */
120 static __always_inline void kasan_free_pages(struct page *page,
123 /* Only available for integrated init. */
127 #endif /* CONFIG_KASAN_HW_TAGS */
129 static inline bool kasan_has_integrated_init(void)
131 return kasan_hw_tags_enabled();
137 int alloc_meta_offset;
138 int free_meta_offset;
142 slab_flags_t __kasan_never_merge(void);
143 static __always_inline slab_flags_t kasan_never_merge(void)
146 return __kasan_never_merge();
150 void __kasan_unpoison_range(const void *addr, size_t size);
151 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
154 __kasan_unpoison_range(addr, size);
157 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
158 static __always_inline void kasan_poison_pages(struct page *page,
159 unsigned int order, bool init)
162 __kasan_poison_pages(page, order, init);
165 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
166 static __always_inline void kasan_unpoison_pages(struct page *page,
167 unsigned int order, bool init)
170 __kasan_unpoison_pages(page, order, init);
173 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
174 slab_flags_t *flags);
175 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
176 unsigned int *size, slab_flags_t *flags)
179 __kasan_cache_create(cache, size, flags);
182 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
183 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
186 __kasan_cache_create_kmalloc(cache);
189 size_t __kasan_metadata_size(struct kmem_cache *cache);
190 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
193 return __kasan_metadata_size(cache);
197 void __kasan_poison_slab(struct slab *slab);
198 static __always_inline void kasan_poison_slab(struct slab *slab)
201 __kasan_poison_slab(slab);
204 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
205 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
209 __kasan_unpoison_object_data(cache, object);
212 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
213 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
217 __kasan_poison_object_data(cache, object);
220 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
222 static __always_inline void * __must_check kasan_init_slab_obj(
223 struct kmem_cache *cache, const void *object)
226 return __kasan_init_slab_obj(cache, object);
227 return (void *)object;
230 bool __kasan_slab_free(struct kmem_cache *s, void *object,
231 unsigned long ip, bool init);
232 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
233 void *object, bool init)
236 return __kasan_slab_free(s, object, _RET_IP_, init);
240 void __kasan_kfree_large(void *ptr, unsigned long ip);
241 static __always_inline void kasan_kfree_large(void *ptr)
244 __kasan_kfree_large(ptr, _RET_IP_);
247 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
248 static __always_inline void kasan_slab_free_mempool(void *ptr)
251 __kasan_slab_free_mempool(ptr, _RET_IP_);
254 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
255 void *object, gfp_t flags, bool init);
256 static __always_inline void * __must_check kasan_slab_alloc(
257 struct kmem_cache *s, void *object, gfp_t flags, bool init)
260 return __kasan_slab_alloc(s, object, flags, init);
264 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
265 size_t size, gfp_t flags);
266 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
267 const void *object, size_t size, gfp_t flags)
270 return __kasan_kmalloc(s, object, size, flags);
271 return (void *)object;
274 void * __must_check __kasan_kmalloc_large(const void *ptr,
275 size_t size, gfp_t flags);
276 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
277 size_t size, gfp_t flags)
280 return __kasan_kmalloc_large(ptr, size, flags);
284 void * __must_check __kasan_krealloc(const void *object,
285 size_t new_size, gfp_t flags);
286 static __always_inline void * __must_check kasan_krealloc(const void *object,
287 size_t new_size, gfp_t flags)
290 return __kasan_krealloc(object, new_size, flags);
291 return (void *)object;
295 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
296 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
298 bool __kasan_check_byte(const void *addr, unsigned long ip);
299 static __always_inline bool kasan_check_byte(const void *addr)
302 return __kasan_check_byte(addr, _RET_IP_);
307 bool kasan_save_enable_multi_shot(void);
308 void kasan_restore_multi_shot(bool enabled);
310 #else /* CONFIG_KASAN */
312 static inline slab_flags_t kasan_never_merge(void)
316 static inline void kasan_unpoison_range(const void *address, size_t size) {}
317 static inline void kasan_poison_pages(struct page *page, unsigned int order,
319 static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
321 static inline void kasan_cache_create(struct kmem_cache *cache,
323 slab_flags_t *flags) {}
324 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
325 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
326 static inline void kasan_poison_slab(struct slab *slab) {}
327 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
329 static inline void kasan_poison_object_data(struct kmem_cache *cache,
331 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
334 return (void *)object;
336 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
340 static inline void kasan_kfree_large(void *ptr) {}
341 static inline void kasan_slab_free_mempool(void *ptr) {}
342 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
343 gfp_t flags, bool init)
347 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
348 size_t size, gfp_t flags)
350 return (void *)object;
352 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
356 static inline void *kasan_krealloc(const void *object, size_t new_size,
359 return (void *)object;
361 static inline bool kasan_check_byte(const void *address)
366 #endif /* CONFIG_KASAN */
368 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
369 void kasan_unpoison_task_stack(struct task_struct *task);
371 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
374 #ifdef CONFIG_KASAN_GENERIC
376 void kasan_cache_shrink(struct kmem_cache *cache);
377 void kasan_cache_shutdown(struct kmem_cache *cache);
378 void kasan_record_aux_stack(void *ptr);
379 void kasan_record_aux_stack_noalloc(void *ptr);
381 #else /* CONFIG_KASAN_GENERIC */
383 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
384 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
385 static inline void kasan_record_aux_stack(void *ptr) {}
386 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
388 #endif /* CONFIG_KASAN_GENERIC */
390 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
392 static inline void *kasan_reset_tag(const void *addr)
394 return (void *)arch_kasan_reset_tag(addr);
398 * kasan_report - print a report about a bad memory access detected by KASAN
399 * @addr: address of the bad access
400 * @size: size of the bad access
401 * @is_write: whether the bad access is a write or a read
402 * @ip: instruction pointer for the accessibility check or the bad access itself
404 bool kasan_report(unsigned long addr, size_t size,
405 bool is_write, unsigned long ip);
407 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
409 static inline void *kasan_reset_tag(const void *addr)
414 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
416 #ifdef CONFIG_KASAN_HW_TAGS
418 void kasan_report_async(void);
420 #endif /* CONFIG_KASAN_HW_TAGS */
422 #ifdef CONFIG_KASAN_SW_TAGS
423 void __init kasan_init_sw_tags(void);
425 static inline void kasan_init_sw_tags(void) { }
428 #ifdef CONFIG_KASAN_HW_TAGS
429 void kasan_init_hw_tags_cpu(void);
430 void __init kasan_init_hw_tags(void);
432 static inline void kasan_init_hw_tags_cpu(void) { }
433 static inline void kasan_init_hw_tags(void) { }
436 #ifdef CONFIG_KASAN_VMALLOC
438 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
439 void kasan_poison_vmalloc(const void *start, unsigned long size);
440 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
441 void kasan_release_vmalloc(unsigned long start, unsigned long end,
442 unsigned long free_region_start,
443 unsigned long free_region_end);
445 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
447 #else /* CONFIG_KASAN_VMALLOC */
449 static inline int kasan_populate_vmalloc(unsigned long start,
455 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
457 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
459 static inline void kasan_release_vmalloc(unsigned long start,
461 unsigned long free_region_start,
462 unsigned long free_region_end) {}
464 static inline void kasan_populate_early_vm_area_shadow(void *start,
468 #endif /* CONFIG_KASAN_VMALLOC */
470 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
471 !defined(CONFIG_KASAN_VMALLOC)
474 * These functions provide a special case to support backing module
475 * allocations with real shadow memory. With KASAN vmalloc, the special
476 * case is unnecessary, as the work is handled in the generic case.
478 int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
479 void kasan_free_shadow(const struct vm_struct *vm);
481 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
483 static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
484 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
486 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
488 #ifdef CONFIG_KASAN_INLINE
489 void kasan_non_canonical_hook(unsigned long addr);
490 #else /* CONFIG_KASAN_INLINE */
491 static inline void kasan_non_canonical_hook(unsigned long addr) { }
492 #endif /* CONFIG_KASAN_INLINE */
494 #endif /* LINUX_KASAN_H */