Merge tag 'soc-fixes-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / mm / kmsan / hooks.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KMSAN hooks for kernel subsystems.
4  *
5  * These functions handle creation of KMSAN metadata for memory allocations.
6  *
7  * Copyright (C) 2018-2022 Google LLC
8  * Author: Alexander Potapenko <glider@google.com>
9  *
10  */
11
12 #include <linux/cacheflush.h>
13 #include <linux/dma-direction.h>
14 #include <linux/gfp.h>
15 #include <linux/kmsan.h>
16 #include <linux/mm.h>
17 #include <linux/mm_types.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/usb.h>
22
23 #include "../internal.h"
24 #include "../slab.h"
25 #include "kmsan.h"
26
27 /*
28  * Instrumented functions shouldn't be called under
29  * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
30  * skipping effects of functions like memset() inside instrumented code.
31  */
32
33 void kmsan_task_create(struct task_struct *task)
34 {
35         kmsan_enter_runtime();
36         kmsan_internal_task_create(task);
37         kmsan_leave_runtime();
38 }
39
40 void kmsan_task_exit(struct task_struct *task)
41 {
42         struct kmsan_ctx *ctx = &task->kmsan_ctx;
43
44         if (!kmsan_enabled || kmsan_in_runtime())
45                 return;
46
47         ctx->allow_reporting = false;
48 }
49
50 void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
51 {
52         if (unlikely(object == NULL))
53                 return;
54         if (!kmsan_enabled || kmsan_in_runtime())
55                 return;
56         /*
57          * There's a ctor or this is an RCU cache - do nothing. The memory
58          * status hasn't changed since last use.
59          */
60         if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
61                 return;
62
63         kmsan_enter_runtime();
64         if (flags & __GFP_ZERO)
65                 kmsan_internal_unpoison_memory(object, s->object_size,
66                                                KMSAN_POISON_CHECK);
67         else
68                 kmsan_internal_poison_memory(object, s->object_size, flags,
69                                              KMSAN_POISON_CHECK);
70         kmsan_leave_runtime();
71 }
72
73 void kmsan_slab_free(struct kmem_cache *s, void *object)
74 {
75         if (!kmsan_enabled || kmsan_in_runtime())
76                 return;
77
78         /* RCU slabs could be legally used after free within the RCU period */
79         if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
80                 return;
81         /*
82          * If there's a constructor, freed memory must remain in the same state
83          * until the next allocation. We cannot save its state to detect
84          * use-after-free bugs, instead we just keep it unpoisoned.
85          */
86         if (s->ctor)
87                 return;
88         kmsan_enter_runtime();
89         kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
90                                      KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
91         kmsan_leave_runtime();
92 }
93
94 void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
95 {
96         if (unlikely(ptr == NULL))
97                 return;
98         if (!kmsan_enabled || kmsan_in_runtime())
99                 return;
100         kmsan_enter_runtime();
101         if (flags & __GFP_ZERO)
102                 kmsan_internal_unpoison_memory((void *)ptr, size,
103                                                /*checked*/ true);
104         else
105                 kmsan_internal_poison_memory((void *)ptr, size, flags,
106                                              KMSAN_POISON_CHECK);
107         kmsan_leave_runtime();
108 }
109
110 void kmsan_kfree_large(const void *ptr)
111 {
112         struct page *page;
113
114         if (!kmsan_enabled || kmsan_in_runtime())
115                 return;
116         kmsan_enter_runtime();
117         page = virt_to_head_page((void *)ptr);
118         KMSAN_WARN_ON(ptr != page_address(page));
119         kmsan_internal_poison_memory((void *)ptr,
120                                      PAGE_SIZE << compound_order(page),
121                                      GFP_KERNEL,
122                                      KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
123         kmsan_leave_runtime();
124 }
125
126 static unsigned long vmalloc_shadow(unsigned long addr)
127 {
128         return (unsigned long)kmsan_get_metadata((void *)addr,
129                                                  KMSAN_META_SHADOW);
130 }
131
132 static unsigned long vmalloc_origin(unsigned long addr)
133 {
134         return (unsigned long)kmsan_get_metadata((void *)addr,
135                                                  KMSAN_META_ORIGIN);
136 }
137
138 void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
139 {
140         __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
141         __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
142         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
143         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
144 }
145
146 /*
147  * This function creates new shadow/origin pages for the physical pages mapped
148  * into the virtual memory. If those physical pages already had shadow/origin,
149  * those are ignored.
150  */
151 void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
152                               phys_addr_t phys_addr, pgprot_t prot,
153                               unsigned int page_shift)
154 {
155         gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
156         struct page *shadow, *origin;
157         unsigned long off = 0;
158         int nr;
159
160         if (!kmsan_enabled || kmsan_in_runtime())
161                 return;
162
163         nr = (end - start) / PAGE_SIZE;
164         kmsan_enter_runtime();
165         for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
166                 shadow = alloc_pages(gfp_mask, 1);
167                 origin = alloc_pages(gfp_mask, 1);
168                 __vmap_pages_range_noflush(
169                         vmalloc_shadow(start + off),
170                         vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
171                         PAGE_SHIFT);
172                 __vmap_pages_range_noflush(
173                         vmalloc_origin(start + off),
174                         vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
175                         PAGE_SHIFT);
176         }
177         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
178         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
179         kmsan_leave_runtime();
180 }
181
182 void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
183 {
184         unsigned long v_shadow, v_origin;
185         struct page *shadow, *origin;
186         int nr;
187
188         if (!kmsan_enabled || kmsan_in_runtime())
189                 return;
190
191         nr = (end - start) / PAGE_SIZE;
192         kmsan_enter_runtime();
193         v_shadow = (unsigned long)vmalloc_shadow(start);
194         v_origin = (unsigned long)vmalloc_origin(start);
195         for (int i = 0; i < nr;
196              i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
197                 shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
198                 origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
199                 __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
200                 __vunmap_range_noflush(v_origin, vmalloc_origin(end));
201                 if (shadow)
202                         __free_pages(shadow, 1);
203                 if (origin)
204                         __free_pages(origin, 1);
205         }
206         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
207         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
208         kmsan_leave_runtime();
209 }
210
211 void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
212                         size_t left)
213 {
214         unsigned long ua_flags;
215
216         if (!kmsan_enabled || kmsan_in_runtime())
217                 return;
218         /*
219          * At this point we've copied the memory already. It's hard to check it
220          * before copying, as the size of actually copied buffer is unknown.
221          */
222
223         /* copy_to_user() may copy zero bytes. No need to check. */
224         if (!to_copy)
225                 return;
226         /* Or maybe copy_to_user() failed to copy anything. */
227         if (to_copy <= left)
228                 return;
229
230         ua_flags = user_access_save();
231         if ((u64)to < TASK_SIZE) {
232                 /* This is a user memory access, check it. */
233                 kmsan_internal_check_memory((void *)from, to_copy - left, to,
234                                             REASON_COPY_TO_USER);
235         } else {
236                 /* Otherwise this is a kernel memory access. This happens when a
237                  * compat syscall passes an argument allocated on the kernel
238                  * stack to a real syscall.
239                  * Don't check anything, just copy the shadow of the copied
240                  * bytes.
241                  */
242                 kmsan_internal_memmove_metadata((void *)to, (void *)from,
243                                                 to_copy - left);
244         }
245         user_access_restore(ua_flags);
246 }
247 EXPORT_SYMBOL(kmsan_copy_to_user);
248
249 /* Helper function to check an URB. */
250 void kmsan_handle_urb(const struct urb *urb, bool is_out)
251 {
252         if (!urb)
253                 return;
254         if (is_out)
255                 kmsan_internal_check_memory(urb->transfer_buffer,
256                                             urb->transfer_buffer_length,
257                                             /*user_addr*/ 0, REASON_SUBMIT_URB);
258         else
259                 kmsan_internal_unpoison_memory(urb->transfer_buffer,
260                                                urb->transfer_buffer_length,
261                                                /*checked*/ false);
262 }
263
264 static void kmsan_handle_dma_page(const void *addr, size_t size,
265                                   enum dma_data_direction dir)
266 {
267         switch (dir) {
268         case DMA_BIDIRECTIONAL:
269                 kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
270                                             REASON_ANY);
271                 kmsan_internal_unpoison_memory((void *)addr, size,
272                                                /*checked*/ false);
273                 break;
274         case DMA_TO_DEVICE:
275                 kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
276                                             REASON_ANY);
277                 break;
278         case DMA_FROM_DEVICE:
279                 kmsan_internal_unpoison_memory((void *)addr, size,
280                                                /*checked*/ false);
281                 break;
282         case DMA_NONE:
283                 break;
284         }
285 }
286
287 /* Helper function to handle DMA data transfers. */
288 void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
289                       enum dma_data_direction dir)
290 {
291         u64 page_offset, to_go, addr;
292
293         if (PageHighMem(page))
294                 return;
295         addr = (u64)page_address(page) + offset;
296         /*
297          * The kernel may occasionally give us adjacent DMA pages not belonging
298          * to the same allocation. Process them separately to avoid triggering
299          * internal KMSAN checks.
300          */
301         while (size > 0) {
302                 page_offset = addr % PAGE_SIZE;
303                 to_go = min(PAGE_SIZE - page_offset, (u64)size);
304                 kmsan_handle_dma_page((void *)addr, to_go, dir);
305                 addr += to_go;
306                 size -= to_go;
307         }
308 }
309
310 void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
311                          enum dma_data_direction dir)
312 {
313         struct scatterlist *item;
314         int i;
315
316         for_each_sg(sg, item, nents, i)
317                 kmsan_handle_dma(sg_page(item), item->offset, item->length,
318                                  dir);
319 }
320
321 /* Functions from kmsan-checks.h follow. */
322 void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
323 {
324         if (!kmsan_enabled || kmsan_in_runtime())
325                 return;
326         kmsan_enter_runtime();
327         /* The users may want to poison/unpoison random memory. */
328         kmsan_internal_poison_memory((void *)address, size, flags,
329                                      KMSAN_POISON_NOCHECK);
330         kmsan_leave_runtime();
331 }
332 EXPORT_SYMBOL(kmsan_poison_memory);
333
334 void kmsan_unpoison_memory(const void *address, size_t size)
335 {
336         unsigned long ua_flags;
337
338         if (!kmsan_enabled || kmsan_in_runtime())
339                 return;
340
341         ua_flags = user_access_save();
342         kmsan_enter_runtime();
343         /* The users may want to poison/unpoison random memory. */
344         kmsan_internal_unpoison_memory((void *)address, size,
345                                        KMSAN_POISON_NOCHECK);
346         kmsan_leave_runtime();
347         user_access_restore(ua_flags);
348 }
349 EXPORT_SYMBOL(kmsan_unpoison_memory);
350
351 /*
352  * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
353  * runtime.
354  *
355  * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
356  * code. Those regs need to be unpoisoned, otherwise using them will result in
357  * false positives.
358  * Using kmsan_unpoison_memory() is not an option in entry code, because the
359  * return value of in_task() is inconsistent - as a result, certain calls to
360  * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
361  * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
362  * entry code.
363  */
364 void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
365 {
366         unsigned long ua_flags;
367
368         if (!kmsan_enabled)
369                 return;
370
371         ua_flags = user_access_save();
372         kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
373                                        KMSAN_POISON_NOCHECK);
374         user_access_restore(ua_flags);
375 }
376
377 void kmsan_check_memory(const void *addr, size_t size)
378 {
379         if (!kmsan_enabled)
380                 return;
381         return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
382                                            REASON_ANY);
383 }
384 EXPORT_SYMBOL(kmsan_check_memory);