1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/static_key.h>
6 #include <linux/types.h>
15 #include <linux/linkage.h>
16 #include <asm/kasan.h>
18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
19 struct kunit_kasan_expectation {
26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28 #include <linux/pgtable.h>
30 /* Software KASAN implementations use shadow memory. */
32 #ifdef CONFIG_KASAN_SW_TAGS
33 #define KASAN_SHADOW_INIT 0xFF
35 #define KASAN_SHADOW_INIT 0
38 #ifndef PTE_HWTABLE_PTRS
39 #define PTE_HWTABLE_PTRS 0
42 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
43 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
44 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
48 int kasan_populate_early_shadow(const void *shadow_start,
49 const void *shadow_end);
51 static inline void *kasan_mem_to_shadow(const void *addr)
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
57 int kasan_add_zero_shadow(void *start, unsigned long size);
58 void kasan_remove_zero_shadow(void *start, unsigned long size);
60 /* Enable reporting bugs after kasan_disable_current() */
61 extern void kasan_enable_current(void);
63 /* Disable reporting bugs for current task */
64 extern void kasan_disable_current(void);
66 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
68 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
72 static inline void kasan_remove_zero_shadow(void *start,
76 static inline void kasan_enable_current(void) {}
77 static inline void kasan_disable_current(void) {}
79 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84 int alloc_meta_offset;
88 #ifdef CONFIG_KASAN_HW_TAGS
90 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
92 static __always_inline bool kasan_enabled(void)
94 return static_branch_likely(&kasan_flag_enabled);
97 #else /* CONFIG_KASAN_HW_TAGS */
99 static inline bool kasan_enabled(void)
104 #endif /* CONFIG_KASAN_HW_TAGS */
106 slab_flags_t __kasan_never_merge(void);
107 static __always_inline slab_flags_t kasan_never_merge(void)
110 return __kasan_never_merge();
114 void __kasan_unpoison_range(const void *addr, size_t size);
115 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
118 __kasan_unpoison_range(addr, size);
121 void __kasan_alloc_pages(struct page *page, unsigned int order);
122 static __always_inline void kasan_alloc_pages(struct page *page,
126 __kasan_alloc_pages(page, order);
129 void __kasan_free_pages(struct page *page, unsigned int order);
130 static __always_inline void kasan_free_pages(struct page *page,
134 __kasan_free_pages(page, order);
137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags);
139 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
140 unsigned int *size, slab_flags_t *flags)
143 __kasan_cache_create(cache, size, flags);
146 size_t __kasan_metadata_size(struct kmem_cache *cache);
147 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
150 return __kasan_metadata_size(cache);
154 void __kasan_poison_slab(struct page *page);
155 static __always_inline void kasan_poison_slab(struct page *page)
158 __kasan_poison_slab(page);
161 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
162 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
166 __kasan_unpoison_object_data(cache, object);
169 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
170 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
174 __kasan_poison_object_data(cache, object);
177 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
179 static __always_inline void * __must_check kasan_init_slab_obj(
180 struct kmem_cache *cache, const void *object)
183 return __kasan_init_slab_obj(cache, object);
184 return (void *)object;
187 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
188 static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
192 return __kasan_slab_free(s, object, ip);
196 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
197 static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
200 __kasan_slab_free_mempool(ptr, ip);
203 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
204 void *object, gfp_t flags);
205 static __always_inline void * __must_check kasan_slab_alloc(
206 struct kmem_cache *s, void *object, gfp_t flags)
209 return __kasan_slab_alloc(s, object, flags);
213 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
214 size_t size, gfp_t flags);
215 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
216 const void *object, size_t size, gfp_t flags)
219 return __kasan_kmalloc(s, object, size, flags);
220 return (void *)object;
223 void * __must_check __kasan_kmalloc_large(const void *ptr,
224 size_t size, gfp_t flags);
225 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
226 size_t size, gfp_t flags)
229 return __kasan_kmalloc_large(ptr, size, flags);
233 void * __must_check __kasan_krealloc(const void *object,
234 size_t new_size, gfp_t flags);
235 static __always_inline void * __must_check kasan_krealloc(const void *object,
236 size_t new_size, gfp_t flags)
239 return __kasan_krealloc(object, new_size, flags);
240 return (void *)object;
243 void __kasan_kfree_large(void *ptr, unsigned long ip);
244 static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
247 __kasan_kfree_large(ptr, ip);
250 bool kasan_save_enable_multi_shot(void);
251 void kasan_restore_multi_shot(bool enabled);
253 #else /* CONFIG_KASAN */
255 static inline bool kasan_enabled(void)
259 static inline slab_flags_t kasan_never_merge(void)
263 static inline void kasan_unpoison_range(const void *address, size_t size) {}
264 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
265 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
266 static inline void kasan_cache_create(struct kmem_cache *cache,
268 slab_flags_t *flags) {}
269 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
270 static inline void kasan_poison_slab(struct page *page) {}
271 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
273 static inline void kasan_poison_object_data(struct kmem_cache *cache,
275 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
278 return (void *)object;
280 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
285 static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
286 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
291 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
292 size_t size, gfp_t flags)
294 return (void *)object;
296 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
300 static inline void *kasan_krealloc(const void *object, size_t new_size,
303 return (void *)object;
305 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
307 #endif /* CONFIG_KASAN */
309 #if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
310 void kasan_unpoison_task_stack(struct task_struct *task);
312 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
315 #ifdef CONFIG_KASAN_GENERIC
317 void kasan_cache_shrink(struct kmem_cache *cache);
318 void kasan_cache_shutdown(struct kmem_cache *cache);
319 void kasan_record_aux_stack(void *ptr);
321 #else /* CONFIG_KASAN_GENERIC */
323 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
324 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
325 static inline void kasan_record_aux_stack(void *ptr) {}
327 #endif /* CONFIG_KASAN_GENERIC */
329 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
331 static inline void *kasan_reset_tag(const void *addr)
333 return (void *)arch_kasan_reset_tag(addr);
337 * kasan_report - print a report about a bad memory access detected by KASAN
338 * @addr: address of the bad access
339 * @size: size of the bad access
340 * @is_write: whether the bad access is a write or a read
341 * @ip: instruction pointer for the accessibility check or the bad access itself
343 bool kasan_report(unsigned long addr, size_t size,
344 bool is_write, unsigned long ip);
346 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
348 static inline void *kasan_reset_tag(const void *addr)
353 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
355 #ifdef CONFIG_KASAN_SW_TAGS
356 void __init kasan_init_sw_tags(void);
358 static inline void kasan_init_sw_tags(void) { }
361 #ifdef CONFIG_KASAN_HW_TAGS
362 void kasan_init_hw_tags_cpu(void);
363 void __init kasan_init_hw_tags(void);
365 static inline void kasan_init_hw_tags_cpu(void) { }
366 static inline void kasan_init_hw_tags(void) { }
369 #ifdef CONFIG_KASAN_VMALLOC
371 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
372 void kasan_poison_vmalloc(const void *start, unsigned long size);
373 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
374 void kasan_release_vmalloc(unsigned long start, unsigned long end,
375 unsigned long free_region_start,
376 unsigned long free_region_end);
378 #else /* CONFIG_KASAN_VMALLOC */
380 static inline int kasan_populate_vmalloc(unsigned long start,
386 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
388 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
390 static inline void kasan_release_vmalloc(unsigned long start,
392 unsigned long free_region_start,
393 unsigned long free_region_end) {}
395 #endif /* CONFIG_KASAN_VMALLOC */
397 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
398 !defined(CONFIG_KASAN_VMALLOC)
401 * These functions provide a special case to support backing module
402 * allocations with real shadow memory. With KASAN vmalloc, the special
403 * case is unnecessary, as the work is handled in the generic case.
405 int kasan_module_alloc(void *addr, size_t size);
406 void kasan_free_shadow(const struct vm_struct *vm);
408 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
410 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
411 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
413 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
415 #ifdef CONFIG_KASAN_INLINE
416 void kasan_non_canonical_hook(unsigned long addr);
417 #else /* CONFIG_KASAN_INLINE */
418 static inline void kasan_non_canonical_hook(unsigned long addr) { }
419 #endif /* CONFIG_KASAN_INLINE */
421 #endif /* LINUX_KASAN_H */