RISC-V: Add Sstc extension support
[linux-2.6-microblaze.git] / mm / kasan / quarantine.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KASAN quarantine.
4  *
5  * Author: Alexander Potapenko <glider@google.com>
6  * Copyright (C) 2016 Google, Inc.
7  *
8  * Based on code by Dmitry Chernenkov.
9  */
10
11 #include <linux/gfp.h>
12 #include <linux/hash.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/percpu.h>
16 #include <linux/printk.h>
17 #include <linux/shrinker.h>
18 #include <linux/slab.h>
19 #include <linux/srcu.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/cpuhotplug.h>
23
24 #include "../slab.h"
25 #include "kasan.h"
26
27 /* Data structure and operations for quarantine queues. */
28
29 /*
30  * Each queue is a single-linked list, which also stores the total size of
31  * objects inside of it.
32  */
33 struct qlist_head {
34         struct qlist_node *head;
35         struct qlist_node *tail;
36         size_t bytes;
37         bool offline;
38 };
39
40 #define QLIST_INIT { NULL, NULL, 0 }
41
42 static bool qlist_empty(struct qlist_head *q)
43 {
44         return !q->head;
45 }
46
47 static void qlist_init(struct qlist_head *q)
48 {
49         q->head = q->tail = NULL;
50         q->bytes = 0;
51 }
52
53 static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
54                 size_t size)
55 {
56         if (unlikely(qlist_empty(q)))
57                 q->head = qlink;
58         else
59                 q->tail->next = qlink;
60         q->tail = qlink;
61         qlink->next = NULL;
62         q->bytes += size;
63 }
64
65 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
66 {
67         if (unlikely(qlist_empty(from)))
68                 return;
69
70         if (qlist_empty(to)) {
71                 *to = *from;
72                 qlist_init(from);
73                 return;
74         }
75
76         to->tail->next = from->head;
77         to->tail = from->tail;
78         to->bytes += from->bytes;
79
80         qlist_init(from);
81 }
82
83 #define QUARANTINE_PERCPU_SIZE (1 << 20)
84 #define QUARANTINE_BATCHES \
85         (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
86
87 /*
88  * The object quarantine consists of per-cpu queues and a global queue,
89  * guarded by quarantine_lock.
90  */
91 static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
92
93 /* Round-robin FIFO array of batches. */
94 static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
95 static int quarantine_head;
96 static int quarantine_tail;
97 /* Total size of all objects in global_quarantine across all batches. */
98 static unsigned long quarantine_size;
99 static DEFINE_RAW_SPINLOCK(quarantine_lock);
100 DEFINE_STATIC_SRCU(remove_cache_srcu);
101
102 #ifdef CONFIG_PREEMPT_RT
103 struct cpu_shrink_qlist {
104         raw_spinlock_t lock;
105         struct qlist_head qlist;
106 };
107
108 static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = {
109         .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock),
110 };
111 #endif
112
113 /* Maximum size of the global queue. */
114 static unsigned long quarantine_max_size;
115
116 /*
117  * Target size of a batch in global_quarantine.
118  * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
119  */
120 static unsigned long quarantine_batch_size;
121
122 /*
123  * The fraction of physical memory the quarantine is allowed to occupy.
124  * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
125  * the ratio low to avoid OOM.
126  */
127 #define QUARANTINE_FRACTION 32
128
129 static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
130 {
131         return virt_to_slab(qlink)->slab_cache;
132 }
133
134 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
135 {
136         struct kasan_free_meta *free_info =
137                 container_of(qlink, struct kasan_free_meta,
138                              quarantine_link);
139
140         return ((void *)free_info) - cache->kasan_info.free_meta_offset;
141 }
142
143 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
144 {
145         void *object = qlink_to_object(qlink, cache);
146         struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
147         unsigned long flags;
148
149         if (IS_ENABLED(CONFIG_SLAB))
150                 local_irq_save(flags);
151
152         /*
153          * If init_on_free is enabled and KASAN's free metadata is stored in
154          * the object, zero the metadata. Otherwise, the object's memory will
155          * not be properly zeroed, as KASAN saves the metadata after the slab
156          * allocator zeroes the object.
157          */
158         if (slab_want_init_on_free(cache) &&
159             cache->kasan_info.free_meta_offset == 0)
160                 memzero_explicit(meta, sizeof(*meta));
161
162         /*
163          * As the object now gets freed from the quarantine, assume that its
164          * free track is no longer valid.
165          */
166         *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
167
168         ___cache_free(cache, object, _THIS_IP_);
169
170         if (IS_ENABLED(CONFIG_SLAB))
171                 local_irq_restore(flags);
172 }
173
174 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
175 {
176         struct qlist_node *qlink;
177
178         if (unlikely(qlist_empty(q)))
179                 return;
180
181         qlink = q->head;
182         while (qlink) {
183                 struct kmem_cache *obj_cache =
184                         cache ? cache : qlink_to_cache(qlink);
185                 struct qlist_node *next = qlink->next;
186
187                 qlink_free(qlink, obj_cache);
188                 qlink = next;
189         }
190         qlist_init(q);
191 }
192
193 bool kasan_quarantine_put(struct kmem_cache *cache, void *object)
194 {
195         unsigned long flags;
196         struct qlist_head *q;
197         struct qlist_head temp = QLIST_INIT;
198         struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
199
200         /*
201          * If there's no metadata for this object, don't put it into
202          * quarantine.
203          */
204         if (!meta)
205                 return false;
206
207         /*
208          * Note: irq must be disabled until after we move the batch to the
209          * global quarantine. Otherwise kasan_quarantine_remove_cache() can
210          * miss some objects belonging to the cache if they are in our local
211          * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
212          * at the beginning which ensures that it either sees the objects in
213          * per-cpu lists or in the global quarantine.
214          */
215         local_irq_save(flags);
216
217         q = this_cpu_ptr(&cpu_quarantine);
218         if (q->offline) {
219                 local_irq_restore(flags);
220                 return false;
221         }
222         qlist_put(q, &meta->quarantine_link, cache->size);
223         if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
224                 qlist_move_all(q, &temp);
225
226                 raw_spin_lock(&quarantine_lock);
227                 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
228                 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
229                 if (global_quarantine[quarantine_tail].bytes >=
230                                 READ_ONCE(quarantine_batch_size)) {
231                         int new_tail;
232
233                         new_tail = quarantine_tail + 1;
234                         if (new_tail == QUARANTINE_BATCHES)
235                                 new_tail = 0;
236                         if (new_tail != quarantine_head)
237                                 quarantine_tail = new_tail;
238                 }
239                 raw_spin_unlock(&quarantine_lock);
240         }
241
242         local_irq_restore(flags);
243
244         return true;
245 }
246
247 void kasan_quarantine_reduce(void)
248 {
249         size_t total_size, new_quarantine_size, percpu_quarantines;
250         unsigned long flags;
251         int srcu_idx;
252         struct qlist_head to_free = QLIST_INIT;
253
254         if (likely(READ_ONCE(quarantine_size) <=
255                    READ_ONCE(quarantine_max_size)))
256                 return;
257
258         /*
259          * srcu critical section ensures that kasan_quarantine_remove_cache()
260          * will not miss objects belonging to the cache while they are in our
261          * local to_free list. srcu is chosen because (1) it gives us private
262          * grace period domain that does not interfere with anything else,
263          * and (2) it allows synchronize_srcu() to return without waiting
264          * if there are no pending read critical sections (which is the
265          * expected case).
266          */
267         srcu_idx = srcu_read_lock(&remove_cache_srcu);
268         raw_spin_lock_irqsave(&quarantine_lock, flags);
269
270         /*
271          * Update quarantine size in case of hotplug. Allocate a fraction of
272          * the installed memory to quarantine minus per-cpu queue limits.
273          */
274         total_size = (totalram_pages() << PAGE_SHIFT) /
275                 QUARANTINE_FRACTION;
276         percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
277         new_quarantine_size = (total_size < percpu_quarantines) ?
278                 0 : total_size - percpu_quarantines;
279         WRITE_ONCE(quarantine_max_size, new_quarantine_size);
280         /* Aim at consuming at most 1/2 of slots in quarantine. */
281         WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
282                 2 * total_size / QUARANTINE_BATCHES));
283
284         if (likely(quarantine_size > quarantine_max_size)) {
285                 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
286                 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
287                 quarantine_head++;
288                 if (quarantine_head == QUARANTINE_BATCHES)
289                         quarantine_head = 0;
290         }
291
292         raw_spin_unlock_irqrestore(&quarantine_lock, flags);
293
294         qlist_free_all(&to_free, NULL);
295         srcu_read_unlock(&remove_cache_srcu, srcu_idx);
296 }
297
298 static void qlist_move_cache(struct qlist_head *from,
299                                    struct qlist_head *to,
300                                    struct kmem_cache *cache)
301 {
302         struct qlist_node *curr;
303
304         if (unlikely(qlist_empty(from)))
305                 return;
306
307         curr = from->head;
308         qlist_init(from);
309         while (curr) {
310                 struct qlist_node *next = curr->next;
311                 struct kmem_cache *obj_cache = qlink_to_cache(curr);
312
313                 if (obj_cache == cache)
314                         qlist_put(to, curr, obj_cache->size);
315                 else
316                         qlist_put(from, curr, obj_cache->size);
317
318                 curr = next;
319         }
320 }
321
322 #ifndef CONFIG_PREEMPT_RT
323 static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
324 {
325         struct kmem_cache *cache = arg;
326         struct qlist_head to_free = QLIST_INIT;
327
328         qlist_move_cache(q, &to_free, cache);
329         qlist_free_all(&to_free, cache);
330 }
331 #else
332 static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
333 {
334         struct kmem_cache *cache = arg;
335         unsigned long flags;
336         struct cpu_shrink_qlist *sq;
337
338         sq = this_cpu_ptr(&shrink_qlist);
339         raw_spin_lock_irqsave(&sq->lock, flags);
340         qlist_move_cache(q, &sq->qlist, cache);
341         raw_spin_unlock_irqrestore(&sq->lock, flags);
342 }
343 #endif
344
345 static void per_cpu_remove_cache(void *arg)
346 {
347         struct qlist_head *q;
348
349         q = this_cpu_ptr(&cpu_quarantine);
350         /*
351          * Ensure the ordering between the writing to q->offline and
352          * per_cpu_remove_cache.  Prevent cpu_quarantine from being corrupted
353          * by interrupt.
354          */
355         if (READ_ONCE(q->offline))
356                 return;
357         __per_cpu_remove_cache(q, arg);
358 }
359
360 /* Free all quarantined objects belonging to cache. */
361 void kasan_quarantine_remove_cache(struct kmem_cache *cache)
362 {
363         unsigned long flags, i;
364         struct qlist_head to_free = QLIST_INIT;
365
366         /*
367          * Must be careful to not miss any objects that are being moved from
368          * per-cpu list to the global quarantine in kasan_quarantine_put(),
369          * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
370          * achieves the first goal, while synchronize_srcu() achieves the
371          * second.
372          */
373         on_each_cpu(per_cpu_remove_cache, cache, 1);
374
375 #ifdef CONFIG_PREEMPT_RT
376         {
377                 int cpu;
378                 struct cpu_shrink_qlist *sq;
379
380                 for_each_online_cpu(cpu) {
381                         sq = per_cpu_ptr(&shrink_qlist, cpu);
382                         raw_spin_lock_irqsave(&sq->lock, flags);
383                         qlist_move_cache(&sq->qlist, &to_free, cache);
384                         raw_spin_unlock_irqrestore(&sq->lock, flags);
385                 }
386                 qlist_free_all(&to_free, cache);
387         }
388 #endif
389
390         raw_spin_lock_irqsave(&quarantine_lock, flags);
391         for (i = 0; i < QUARANTINE_BATCHES; i++) {
392                 if (qlist_empty(&global_quarantine[i]))
393                         continue;
394                 qlist_move_cache(&global_quarantine[i], &to_free, cache);
395                 /* Scanning whole quarantine can take a while. */
396                 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
397                 cond_resched();
398                 raw_spin_lock_irqsave(&quarantine_lock, flags);
399         }
400         raw_spin_unlock_irqrestore(&quarantine_lock, flags);
401
402         qlist_free_all(&to_free, cache);
403
404         synchronize_srcu(&remove_cache_srcu);
405 }
406
407 static int kasan_cpu_online(unsigned int cpu)
408 {
409         this_cpu_ptr(&cpu_quarantine)->offline = false;
410         return 0;
411 }
412
413 static int kasan_cpu_offline(unsigned int cpu)
414 {
415         struct qlist_head *q;
416
417         q = this_cpu_ptr(&cpu_quarantine);
418         /* Ensure the ordering between the writing to q->offline and
419          * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
420          * by interrupt.
421          */
422         WRITE_ONCE(q->offline, true);
423         barrier();
424         qlist_free_all(q, NULL);
425         return 0;
426 }
427
428 static int __init kasan_cpu_quarantine_init(void)
429 {
430         int ret = 0;
431
432         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
433                                 kasan_cpu_online, kasan_cpu_offline);
434         if (ret < 0)
435                 pr_err("kasan cpu quarantine register failed [%d]\n", ret);
436         return ret;
437 }
438 late_initcall(kasan_cpu_quarantine_init);