Merge tag 'for-linus-6.0-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / stackleak.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This code fills the used part of the kernel stack with a poison value
4  * before returning to userspace. It's part of the STACKLEAK feature
5  * ported from grsecurity/PaX.
6  *
7  * Author: Alexander Popov <alex.popov@linux.com>
8  *
9  * STACKLEAK reduces the information which kernel stack leak bugs can
10  * reveal and blocks some uninitialized stack variable attacks.
11  */
12
13 #include <linux/stackleak.h>
14 #include <linux/kprobes.h>
15
16 #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
17 #include <linux/jump_label.h>
18 #include <linux/sysctl.h>
19 #include <linux/init.h>
20
21 static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
22
23 #ifdef CONFIG_SYSCTL
24 static int stack_erasing_sysctl(struct ctl_table *table, int write,
25                         void __user *buffer, size_t *lenp, loff_t *ppos)
26 {
27         int ret = 0;
28         int state = !static_branch_unlikely(&stack_erasing_bypass);
29         int prev_state = state;
30
31         table->data = &state;
32         table->maxlen = sizeof(int);
33         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
34         state = !!state;
35         if (ret || !write || state == prev_state)
36                 return ret;
37
38         if (state)
39                 static_branch_disable(&stack_erasing_bypass);
40         else
41                 static_branch_enable(&stack_erasing_bypass);
42
43         pr_warn("stackleak: kernel stack erasing is %s\n",
44                                         state ? "enabled" : "disabled");
45         return ret;
46 }
47 static struct ctl_table stackleak_sysctls[] = {
48         {
49                 .procname       = "stack_erasing",
50                 .data           = NULL,
51                 .maxlen         = sizeof(int),
52                 .mode           = 0600,
53                 .proc_handler   = stack_erasing_sysctl,
54                 .extra1         = SYSCTL_ZERO,
55                 .extra2         = SYSCTL_ONE,
56         },
57         {}
58 };
59
60 static int __init stackleak_sysctls_init(void)
61 {
62         register_sysctl_init("kernel", stackleak_sysctls);
63         return 0;
64 }
65 late_initcall(stackleak_sysctls_init);
66 #endif /* CONFIG_SYSCTL */
67
68 #define skip_erasing()  static_branch_unlikely(&stack_erasing_bypass)
69 #else
70 #define skip_erasing()  false
71 #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
72
73 static __always_inline void __stackleak_erase(bool on_task_stack)
74 {
75         const unsigned long task_stack_low = stackleak_task_low_bound(current);
76         const unsigned long task_stack_high = stackleak_task_high_bound(current);
77         unsigned long erase_low, erase_high;
78
79         erase_low = stackleak_find_top_of_poison(task_stack_low,
80                                                  current->lowest_stack);
81
82 #ifdef CONFIG_STACKLEAK_METRICS
83         current->prev_lowest_stack = erase_low;
84 #endif
85
86         /*
87          * Write poison to the task's stack between 'erase_low' and
88          * 'erase_high'.
89          *
90          * If we're running on a different stack (e.g. an entry trampoline
91          * stack) we can erase everything below the pt_regs at the top of the
92          * task stack.
93          *
94          * If we're running on the task stack itself, we must not clobber any
95          * stack used by this function and its caller. We assume that this
96          * function has a fixed-size stack frame, and the current stack pointer
97          * doesn't change while we write poison.
98          */
99         if (on_task_stack)
100                 erase_high = current_stack_pointer;
101         else
102                 erase_high = task_stack_high;
103
104         while (erase_low < erase_high) {
105                 *(unsigned long *)erase_low = STACKLEAK_POISON;
106                 erase_low += sizeof(unsigned long);
107         }
108
109         /* Reset the 'lowest_stack' value for the next syscall */
110         current->lowest_stack = task_stack_high;
111 }
112
113 /*
114  * Erase and poison the portion of the task stack used since the last erase.
115  * Can be called from the task stack or an entry stack when the task stack is
116  * no longer in use.
117  */
118 asmlinkage void noinstr stackleak_erase(void)
119 {
120         if (skip_erasing())
121                 return;
122
123         __stackleak_erase(on_thread_stack());
124 }
125
126 /*
127  * Erase and poison the portion of the task stack used since the last erase.
128  * Can only be called from the task stack.
129  */
130 asmlinkage void noinstr stackleak_erase_on_task_stack(void)
131 {
132         if (skip_erasing())
133                 return;
134
135         __stackleak_erase(true);
136 }
137
138 /*
139  * Erase and poison the portion of the task stack used since the last erase.
140  * Can only be called from a stack other than the task stack.
141  */
142 asmlinkage void noinstr stackleak_erase_off_task_stack(void)
143 {
144         if (skip_erasing())
145                 return;
146
147         __stackleak_erase(false);
148 }
149
150 void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
151 {
152         unsigned long sp = current_stack_pointer;
153
154         /*
155          * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
156          * STACKLEAK_SEARCH_DEPTH makes the poison search in
157          * stackleak_erase() unreliable. Let's prevent that.
158          */
159         BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
160
161         /* 'lowest_stack' should be aligned on the register width boundary */
162         sp = ALIGN(sp, sizeof(unsigned long));
163         if (sp < current->lowest_stack &&
164             sp >= stackleak_task_low_bound(current)) {
165                 current->lowest_stack = sp;
166         }
167 }
168 EXPORT_SYMBOL(stackleak_track_stack);