1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/static_key.h>
6 #include <linux/types.h>
15 #include <linux/linkage.h>
16 #include <asm/kasan.h>
18 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
19 struct kunit_kasan_expectation {
26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28 #include <linux/pgtable.h>
30 /* Software KASAN implementations use shadow memory. */
32 #ifdef CONFIG_KASAN_SW_TAGS
33 #define KASAN_SHADOW_INIT 0xFF
35 #define KASAN_SHADOW_INIT 0
38 #ifndef PTE_HWTABLE_PTRS
39 #define PTE_HWTABLE_PTRS 0
42 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
43 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
44 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
48 int kasan_populate_early_shadow(const void *shadow_start,
49 const void *shadow_end);
51 static inline void *kasan_mem_to_shadow(const void *addr)
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
57 int kasan_add_zero_shadow(void *start, unsigned long size);
58 void kasan_remove_zero_shadow(void *start, unsigned long size);
60 /* Enable reporting bugs after kasan_disable_current() */
61 extern void kasan_enable_current(void);
63 /* Disable reporting bugs for current task */
64 extern void kasan_disable_current(void);
66 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
68 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
72 static inline void kasan_remove_zero_shadow(void *start,
76 static inline void kasan_enable_current(void) {}
77 static inline void kasan_disable_current(void) {}
79 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84 int alloc_meta_offset;
89 #ifdef CONFIG_KASAN_HW_TAGS
91 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
93 static __always_inline bool kasan_enabled(void)
95 return static_branch_likely(&kasan_flag_enabled);
98 #else /* CONFIG_KASAN_HW_TAGS */
100 static inline bool kasan_enabled(void)
105 #endif /* CONFIG_KASAN_HW_TAGS */
107 slab_flags_t __kasan_never_merge(void);
108 static __always_inline slab_flags_t kasan_never_merge(void)
111 return __kasan_never_merge();
115 void __kasan_unpoison_range(const void *addr, size_t size);
116 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
119 __kasan_unpoison_range(addr, size);
122 void __kasan_alloc_pages(struct page *page, unsigned int order);
123 static __always_inline void kasan_alloc_pages(struct page *page,
127 __kasan_alloc_pages(page, order);
130 void __kasan_free_pages(struct page *page, unsigned int order);
131 static __always_inline void kasan_free_pages(struct page *page,
135 __kasan_free_pages(page, order);
138 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
139 slab_flags_t *flags);
140 static __always_inline void kasan_cache_create(struct kmem_cache *cache,
141 unsigned int *size, slab_flags_t *flags)
144 __kasan_cache_create(cache, size, flags);
147 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
148 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
151 __kasan_cache_create_kmalloc(cache);
154 size_t __kasan_metadata_size(struct kmem_cache *cache);
155 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
158 return __kasan_metadata_size(cache);
162 void __kasan_poison_slab(struct page *page);
163 static __always_inline void kasan_poison_slab(struct page *page)
166 __kasan_poison_slab(page);
169 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
170 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
174 __kasan_unpoison_object_data(cache, object);
177 void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
178 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
182 __kasan_poison_object_data(cache, object);
185 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
187 static __always_inline void * __must_check kasan_init_slab_obj(
188 struct kmem_cache *cache, const void *object)
191 return __kasan_init_slab_obj(cache, object);
192 return (void *)object;
195 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
196 static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
199 return __kasan_slab_free(s, object, _RET_IP_);
203 void __kasan_kfree_large(void *ptr, unsigned long ip);
204 static __always_inline void kasan_kfree_large(void *ptr)
207 __kasan_kfree_large(ptr, _RET_IP_);
210 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
211 static __always_inline void kasan_slab_free_mempool(void *ptr)
214 __kasan_slab_free_mempool(ptr, _RET_IP_);
217 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
218 void *object, gfp_t flags);
219 static __always_inline void * __must_check kasan_slab_alloc(
220 struct kmem_cache *s, void *object, gfp_t flags)
223 return __kasan_slab_alloc(s, object, flags);
227 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
228 size_t size, gfp_t flags);
229 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
230 const void *object, size_t size, gfp_t flags)
233 return __kasan_kmalloc(s, object, size, flags);
234 return (void *)object;
237 void * __must_check __kasan_kmalloc_large(const void *ptr,
238 size_t size, gfp_t flags);
239 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
240 size_t size, gfp_t flags)
243 return __kasan_kmalloc_large(ptr, size, flags);
247 void * __must_check __kasan_krealloc(const void *object,
248 size_t new_size, gfp_t flags);
249 static __always_inline void * __must_check kasan_krealloc(const void *object,
250 size_t new_size, gfp_t flags)
253 return __kasan_krealloc(object, new_size, flags);
254 return (void *)object;
258 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
259 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
261 bool __kasan_check_byte(const void *addr, unsigned long ip);
262 static __always_inline bool kasan_check_byte(const void *addr)
265 return __kasan_check_byte(addr, _RET_IP_);
270 bool kasan_save_enable_multi_shot(void);
271 void kasan_restore_multi_shot(bool enabled);
273 #else /* CONFIG_KASAN */
275 static inline bool kasan_enabled(void)
279 static inline slab_flags_t kasan_never_merge(void)
283 static inline void kasan_unpoison_range(const void *address, size_t size) {}
284 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
285 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
286 static inline void kasan_cache_create(struct kmem_cache *cache,
288 slab_flags_t *flags) {}
289 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
290 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
291 static inline void kasan_poison_slab(struct page *page) {}
292 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
294 static inline void kasan_poison_object_data(struct kmem_cache *cache,
296 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
299 return (void *)object;
301 static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
305 static inline void kasan_kfree_large(void *ptr) {}
306 static inline void kasan_slab_free_mempool(void *ptr) {}
307 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
312 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
313 size_t size, gfp_t flags)
315 return (void *)object;
317 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
321 static inline void *kasan_krealloc(const void *object, size_t new_size,
324 return (void *)object;
326 static inline bool kasan_check_byte(const void *address)
331 #endif /* CONFIG_KASAN */
333 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
334 void kasan_unpoison_task_stack(struct task_struct *task);
336 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
339 #ifdef CONFIG_KASAN_GENERIC
341 void kasan_cache_shrink(struct kmem_cache *cache);
342 void kasan_cache_shutdown(struct kmem_cache *cache);
343 void kasan_record_aux_stack(void *ptr);
345 #else /* CONFIG_KASAN_GENERIC */
347 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
348 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
349 static inline void kasan_record_aux_stack(void *ptr) {}
351 #endif /* CONFIG_KASAN_GENERIC */
353 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
355 static inline void *kasan_reset_tag(const void *addr)
357 return (void *)arch_kasan_reset_tag(addr);
361 * kasan_report - print a report about a bad memory access detected by KASAN
362 * @addr: address of the bad access
363 * @size: size of the bad access
364 * @is_write: whether the bad access is a write or a read
365 * @ip: instruction pointer for the accessibility check or the bad access itself
367 bool kasan_report(unsigned long addr, size_t size,
368 bool is_write, unsigned long ip);
370 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
372 static inline void *kasan_reset_tag(const void *addr)
377 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
379 #ifdef CONFIG_KASAN_SW_TAGS
380 void __init kasan_init_sw_tags(void);
382 static inline void kasan_init_sw_tags(void) { }
385 #ifdef CONFIG_KASAN_HW_TAGS
386 void kasan_init_hw_tags_cpu(void);
387 void __init kasan_init_hw_tags(void);
389 static inline void kasan_init_hw_tags_cpu(void) { }
390 static inline void kasan_init_hw_tags(void) { }
393 #ifdef CONFIG_KASAN_VMALLOC
395 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
396 void kasan_poison_vmalloc(const void *start, unsigned long size);
397 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
398 void kasan_release_vmalloc(unsigned long start, unsigned long end,
399 unsigned long free_region_start,
400 unsigned long free_region_end);
402 #else /* CONFIG_KASAN_VMALLOC */
404 static inline int kasan_populate_vmalloc(unsigned long start,
410 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
412 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
414 static inline void kasan_release_vmalloc(unsigned long start,
416 unsigned long free_region_start,
417 unsigned long free_region_end) {}
419 #endif /* CONFIG_KASAN_VMALLOC */
421 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
422 !defined(CONFIG_KASAN_VMALLOC)
425 * These functions provide a special case to support backing module
426 * allocations with real shadow memory. With KASAN vmalloc, the special
427 * case is unnecessary, as the work is handled in the generic case.
429 int kasan_module_alloc(void *addr, size_t size);
430 void kasan_free_shadow(const struct vm_struct *vm);
432 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
434 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
435 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
437 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
439 #ifdef CONFIG_KASAN_INLINE
440 void kasan_non_canonical_hook(unsigned long addr);
441 #else /* CONFIG_KASAN_INLINE */
442 static inline void kasan_non_canonical_hook(unsigned long addr) { }
443 #endif /* CONFIG_KASAN_INLINE */
445 #endif /* LINUX_KASAN_H */