1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains core hardware tag-based KASAN code.
5 * Copyright (c) 2020 Google, Inc.
6 * Author: Andrey Konovalov <andreyknvl@google.com>
9 #define pr_fmt(fmt) "kasan: " fmt
11 #include <linux/init.h>
12 #include <linux/kasan.h>
13 #include <linux/kernel.h>
14 #include <linux/memory.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
29 KASAN_ARG_MODE_DEFAULT,
35 enum kasan_arg_vmalloc {
36 KASAN_ARG_VMALLOC_DEFAULT,
37 KASAN_ARG_VMALLOC_OFF,
41 static enum kasan_arg kasan_arg __ro_after_init;
42 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
43 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
46 * Whether KASAN is enabled at all.
47 * The value remains false until KASAN is initialized by kasan_init_hw_tags().
49 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
50 EXPORT_SYMBOL(kasan_flag_enabled);
53 * Whether the selected mode is synchronous, asynchronous, or asymmetric.
54 * Defaults to KASAN_MODE_SYNC.
56 enum kasan_mode kasan_mode __ro_after_init;
57 EXPORT_SYMBOL_GPL(kasan_mode);
59 /* Whether to enable vmalloc tagging. */
60 #ifdef CONFIG_KASAN_VMALLOC
61 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
63 DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
65 EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
67 #define PAGE_ALLOC_SAMPLE_DEFAULT 1
68 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
71 * Sampling interval of page_alloc allocation (un)poisoning.
72 * Defaults to no sampling.
74 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
77 * Minimum order of page_alloc allocations to be affected by sampling.
78 * The default value is chosen to match both
79 * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
81 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
83 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
86 static int __init early_kasan_flag(char *arg)
91 if (!strcmp(arg, "off"))
92 kasan_arg = KASAN_ARG_OFF;
93 else if (!strcmp(arg, "on"))
94 kasan_arg = KASAN_ARG_ON;
100 early_param("kasan", early_kasan_flag);
102 /* kasan.mode=sync/async/asymm */
103 static int __init early_kasan_mode(char *arg)
108 if (!strcmp(arg, "sync"))
109 kasan_arg_mode = KASAN_ARG_MODE_SYNC;
110 else if (!strcmp(arg, "async"))
111 kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
112 else if (!strcmp(arg, "asymm"))
113 kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
119 early_param("kasan.mode", early_kasan_mode);
121 /* kasan.vmalloc=off/on */
122 static int __init early_kasan_flag_vmalloc(char *arg)
127 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
130 if (!strcmp(arg, "off"))
131 kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
132 else if (!strcmp(arg, "on"))
133 kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
139 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
141 static inline const char *kasan_mode_info(void)
143 if (kasan_mode == KASAN_MODE_ASYNC)
145 else if (kasan_mode == KASAN_MODE_ASYMM)
151 /* kasan.page_alloc.sample=<sampling interval> */
152 static int __init early_kasan_flag_page_alloc_sample(char *arg)
159 rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
163 if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
164 kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
170 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
172 /* kasan.page_alloc.sample.order=<minimum page order> */
173 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
180 rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
184 if (kasan_page_alloc_sample_order > INT_MAX) {
185 kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
191 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
194 * kasan_init_hw_tags_cpu() is called for each CPU.
195 * Not marked as __init as a CPU can be hot-plugged after boot.
197 void kasan_init_hw_tags_cpu(void)
200 * There's no need to check that the hardware is MTE-capable here,
201 * as this function is only called for MTE-capable hardware.
205 * If KASAN is disabled via command line, don't initialize it.
206 * When this function is called, kasan_flag_enabled is not yet
207 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
209 if (kasan_arg == KASAN_ARG_OFF)
213 * Enable async or asymm modes only when explicitly requested
214 * through the command line.
216 kasan_enable_hw_tags();
219 /* kasan_init_hw_tags() is called once on boot CPU. */
220 void __init kasan_init_hw_tags(void)
222 /* If hardware doesn't support MTE, don't initialize KASAN. */
223 if (!system_supports_mte())
226 /* If KASAN is disabled via command line, don't initialize it. */
227 if (kasan_arg == KASAN_ARG_OFF)
230 switch (kasan_arg_mode) {
231 case KASAN_ARG_MODE_DEFAULT:
232 /* Default is specified by kasan_mode definition. */
234 case KASAN_ARG_MODE_SYNC:
235 kasan_mode = KASAN_MODE_SYNC;
237 case KASAN_ARG_MODE_ASYNC:
238 kasan_mode = KASAN_MODE_ASYNC;
240 case KASAN_ARG_MODE_ASYMM:
241 kasan_mode = KASAN_MODE_ASYMM;
245 switch (kasan_arg_vmalloc) {
246 case KASAN_ARG_VMALLOC_DEFAULT:
247 /* Default is specified by kasan_flag_vmalloc definition. */
249 case KASAN_ARG_VMALLOC_OFF:
250 static_branch_disable(&kasan_flag_vmalloc);
252 case KASAN_ARG_VMALLOC_ON:
253 static_branch_enable(&kasan_flag_vmalloc);
259 /* KASAN is now initialized, enable it. */
260 static_branch_enable(&kasan_flag_enabled);
262 pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
264 kasan_vmalloc_enabled() ? "on" : "off",
265 kasan_stack_collection_enabled() ? "on" : "off");
268 #ifdef CONFIG_KASAN_VMALLOC
270 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
272 struct vm_struct *area;
276 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
277 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
278 * should belong to a single area.
280 area = find_vm_area((void *)addr);
284 for (i = 0; i < area->nr_pages; i++) {
285 struct page *page = area->pages[i];
287 page_kasan_tag_set(page, tag);
291 static void init_vmalloc_pages(const void *start, unsigned long size)
295 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
296 struct page *page = vmalloc_to_page(addr);
298 clear_highpage_kasan_tagged(page);
302 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
303 kasan_vmalloc_flags_t flags)
306 unsigned long redzone_start, redzone_size;
308 if (!kasan_vmalloc_enabled()) {
309 if (flags & KASAN_VMALLOC_INIT)
310 init_vmalloc_pages(start, size);
311 return (void *)start;
315 * Don't tag non-VM_ALLOC mappings, as:
317 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
318 * supports tagging physical memory. Therefore, it can only tag a
319 * single mapping of normal physical pages.
320 * 2. Hardware tag-based KASAN can only tag memory mapped with special
321 * mapping protection bits, see arch_vmap_pgprot_tagged().
322 * As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
323 * providing these bits would require tracking all non-VM_ALLOC
326 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
327 * the first virtual mapping, which is created by vmalloc().
328 * Tagging the page_alloc memory backing that vmalloc() allocation is
329 * skipped, see ___GFP_SKIP_KASAN.
331 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
333 if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
334 WARN_ON(flags & KASAN_VMALLOC_INIT);
335 return (void *)start;
339 * Don't tag executable memory.
340 * The kernel doesn't tolerate having the PC register tagged.
342 if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
343 WARN_ON(flags & KASAN_VMALLOC_INIT);
344 return (void *)start;
347 tag = kasan_random_tag();
348 start = set_tag(start, tag);
350 /* Unpoison and initialize memory up to size. */
351 kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
354 * Explicitly poison and initialize the in-page vmalloc() redzone.
355 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
356 * unpoison memory when populating shadow for vmalloc() space.
358 redzone_start = round_up((unsigned long)start + size,
360 redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
361 kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
362 flags & KASAN_VMALLOC_INIT);
365 * Set per-page tag flags to allow accessing physical memory for the
366 * vmalloc() mapping through page_address(vmalloc_to_page()).
368 unpoison_vmalloc_pages(start, tag);
370 return (void *)start;
373 void __kasan_poison_vmalloc(const void *start, unsigned long size)
377 * The physical pages backing the vmalloc() allocation are poisoned
378 * through the usual page_alloc paths.
384 void kasan_enable_hw_tags(void)
386 if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
387 hw_enable_tag_checks_async();
388 else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
389 hw_enable_tag_checks_asymm();
391 hw_enable_tag_checks_sync();
394 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
396 EXPORT_SYMBOL_GPL(kasan_enable_hw_tags);
398 void kasan_force_async_fault(void)
400 hw_force_async_tag_fault();
402 EXPORT_SYMBOL_GPL(kasan_force_async_fault);