Merge tag 'for-5.1/block-20190302' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / kernel / livepatch / transition.c
1 /*
2  * transition.c - Kernel Live Patching transition functions
3  *
4  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "transition.h"
27 #include "../sched/sched.h"
28
29 #define MAX_STACK_ENTRIES  100
30 #define STACK_ERR_BUF_SIZE 128
31
32 #define SIGNALS_TIMEOUT 15
33
34 struct klp_patch *klp_transition_patch;
35
36 static int klp_target_state = KLP_UNDEFINED;
37
38 static unsigned int klp_signals_cnt;
39
40 /*
41  * This work can be performed periodically to finish patching or unpatching any
42  * "straggler" tasks which failed to transition in the first attempt.
43  */
44 static void klp_transition_work_fn(struct work_struct *work)
45 {
46         mutex_lock(&klp_mutex);
47
48         if (klp_transition_patch)
49                 klp_try_complete_transition();
50
51         mutex_unlock(&klp_mutex);
52 }
53 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
54
55 /*
56  * This function is just a stub to implement a hard force
57  * of synchronize_rcu(). This requires synchronizing
58  * tasks even in userspace and idle.
59  */
60 static void klp_sync(struct work_struct *work)
61 {
62 }
63
64 /*
65  * We allow to patch also functions where RCU is not watching,
66  * e.g. before user_exit(). We can not rely on the RCU infrastructure
67  * to do the synchronization. Instead hard force the sched synchronization.
68  *
69  * This approach allows to use RCU functions for manipulating func_stack
70  * safely.
71  */
72 static void klp_synchronize_transition(void)
73 {
74         schedule_on_each_cpu(klp_sync);
75 }
76
77 /*
78  * The transition to the target patch state is complete.  Clean up the data
79  * structures.
80  */
81 static void klp_complete_transition(void)
82 {
83         struct klp_object *obj;
84         struct klp_func *func;
85         struct task_struct *g, *task;
86         unsigned int cpu;
87
88         pr_debug("'%s': completing %s transition\n",
89                  klp_transition_patch->mod->name,
90                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
91
92         if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
93                 klp_discard_replaced_patches(klp_transition_patch);
94                 klp_discard_nops(klp_transition_patch);
95         }
96
97         if (klp_target_state == KLP_UNPATCHED) {
98                 /*
99                  * All tasks have transitioned to KLP_UNPATCHED so we can now
100                  * remove the new functions from the func_stack.
101                  */
102                 klp_unpatch_objects(klp_transition_patch);
103
104                 /*
105                  * Make sure klp_ftrace_handler() can no longer see functions
106                  * from this patch on the ops->func_stack.  Otherwise, after
107                  * func->transition gets cleared, the handler may choose a
108                  * removed function.
109                  */
110                 klp_synchronize_transition();
111         }
112
113         klp_for_each_object(klp_transition_patch, obj)
114                 klp_for_each_func(obj, func)
115                         func->transition = false;
116
117         /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
118         if (klp_target_state == KLP_PATCHED)
119                 klp_synchronize_transition();
120
121         read_lock(&tasklist_lock);
122         for_each_process_thread(g, task) {
123                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124                 task->patch_state = KLP_UNDEFINED;
125         }
126         read_unlock(&tasklist_lock);
127
128         for_each_possible_cpu(cpu) {
129                 task = idle_task(cpu);
130                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
131                 task->patch_state = KLP_UNDEFINED;
132         }
133
134         klp_for_each_object(klp_transition_patch, obj) {
135                 if (!klp_is_object_loaded(obj))
136                         continue;
137                 if (klp_target_state == KLP_PATCHED)
138                         klp_post_patch_callback(obj);
139                 else if (klp_target_state == KLP_UNPATCHED)
140                         klp_post_unpatch_callback(obj);
141         }
142
143         pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
144                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
145
146         klp_target_state = KLP_UNDEFINED;
147         klp_transition_patch = NULL;
148 }
149
150 /*
151  * This is called in the error path, to cancel a transition before it has
152  * started, i.e. klp_init_transition() has been called but
153  * klp_start_transition() hasn't.  If the transition *has* been started,
154  * klp_reverse_transition() should be used instead.
155  */
156 void klp_cancel_transition(void)
157 {
158         if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
159                 return;
160
161         pr_debug("'%s': canceling patching transition, going to unpatch\n",
162                  klp_transition_patch->mod->name);
163
164         klp_target_state = KLP_UNPATCHED;
165         klp_complete_transition();
166 }
167
168 /*
169  * Switch the patched state of the task to the set of functions in the target
170  * patch state.
171  *
172  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
173  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
174  */
175 void klp_update_patch_state(struct task_struct *task)
176 {
177         /*
178          * A variant of synchronize_rcu() is used to allow patching functions
179          * where RCU is not watching, see klp_synchronize_transition().
180          */
181         preempt_disable_notrace();
182
183         /*
184          * This test_and_clear_tsk_thread_flag() call also serves as a read
185          * barrier (smp_rmb) for two cases:
186          *
187          * 1) Enforce the order of the TIF_PATCH_PENDING read and the
188          *    klp_target_state read.  The corresponding write barrier is in
189          *    klp_init_transition().
190          *
191          * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
192          *    of func->transition, if klp_ftrace_handler() is called later on
193          *    the same CPU.  See __klp_disable_patch().
194          */
195         if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196                 task->patch_state = READ_ONCE(klp_target_state);
197
198         preempt_enable_notrace();
199 }
200
201 /*
202  * Determine whether the given stack trace includes any references to a
203  * to-be-patched or to-be-unpatched function.
204  */
205 static int klp_check_stack_func(struct klp_func *func,
206                                 struct stack_trace *trace)
207 {
208         unsigned long func_addr, func_size, address;
209         struct klp_ops *ops;
210         int i;
211
212         for (i = 0; i < trace->nr_entries; i++) {
213                 address = trace->entries[i];
214
215                 if (klp_target_state == KLP_UNPATCHED) {
216                          /*
217                           * Check for the to-be-unpatched function
218                           * (the func itself).
219                           */
220                         func_addr = (unsigned long)func->new_func;
221                         func_size = func->new_size;
222                 } else {
223                         /*
224                          * Check for the to-be-patched function
225                          * (the previous func).
226                          */
227                         ops = klp_find_ops(func->old_func);
228
229                         if (list_is_singular(&ops->func_stack)) {
230                                 /* original function */
231                                 func_addr = (unsigned long)func->old_func;
232                                 func_size = func->old_size;
233                         } else {
234                                 /* previously patched function */
235                                 struct klp_func *prev;
236
237                                 prev = list_next_entry(func, stack_node);
238                                 func_addr = (unsigned long)prev->new_func;
239                                 func_size = prev->new_size;
240                         }
241                 }
242
243                 if (address >= func_addr && address < func_addr + func_size)
244                         return -EAGAIN;
245         }
246
247         return 0;
248 }
249
250 /*
251  * Determine whether it's safe to transition the task to the target patch state
252  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
253  */
254 static int klp_check_stack(struct task_struct *task, char *err_buf)
255 {
256         static unsigned long entries[MAX_STACK_ENTRIES];
257         struct stack_trace trace;
258         struct klp_object *obj;
259         struct klp_func *func;
260         int ret;
261
262         trace.skip = 0;
263         trace.nr_entries = 0;
264         trace.max_entries = MAX_STACK_ENTRIES;
265         trace.entries = entries;
266         ret = save_stack_trace_tsk_reliable(task, &trace);
267         WARN_ON_ONCE(ret == -ENOSYS);
268         if (ret) {
269                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
270                          "%s: %s:%d has an unreliable stack\n",
271                          __func__, task->comm, task->pid);
272                 return ret;
273         }
274
275         klp_for_each_object(klp_transition_patch, obj) {
276                 if (!obj->patched)
277                         continue;
278                 klp_for_each_func(obj, func) {
279                         ret = klp_check_stack_func(func, &trace);
280                         if (ret) {
281                                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
282                                          "%s: %s:%d is sleeping on function %s\n",
283                                          __func__, task->comm, task->pid,
284                                          func->old_name);
285                                 return ret;
286                         }
287                 }
288         }
289
290         return 0;
291 }
292
293 /*
294  * Try to safely switch a task to the target patch state.  If it's currently
295  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
296  * if the stack is unreliable, return false.
297  */
298 static bool klp_try_switch_task(struct task_struct *task)
299 {
300         struct rq *rq;
301         struct rq_flags flags;
302         int ret;
303         bool success = false;
304         char err_buf[STACK_ERR_BUF_SIZE];
305
306         err_buf[0] = '\0';
307
308         /* check if this task has already switched over */
309         if (task->patch_state == klp_target_state)
310                 return true;
311
312         /*
313          * Now try to check the stack for any to-be-patched or to-be-unpatched
314          * functions.  If all goes well, switch the task to the target patch
315          * state.
316          */
317         rq = task_rq_lock(task, &flags);
318
319         if (task_running(rq, task) && task != current) {
320                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
321                          "%s: %s:%d is running\n", __func__, task->comm,
322                          task->pid);
323                 goto done;
324         }
325
326         ret = klp_check_stack(task, err_buf);
327         if (ret)
328                 goto done;
329
330         success = true;
331
332         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
333         task->patch_state = klp_target_state;
334
335 done:
336         task_rq_unlock(rq, task, &flags);
337
338         /*
339          * Due to console deadlock issues, pr_debug() can't be used while
340          * holding the task rq lock.  Instead we have to use a temporary buffer
341          * and print the debug message after releasing the lock.
342          */
343         if (err_buf[0] != '\0')
344                 pr_debug("%s", err_buf);
345
346         return success;
347
348 }
349
350 /*
351  * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
352  * Kthreads with TIF_PATCH_PENDING set are woken up.
353  */
354 static void klp_send_signals(void)
355 {
356         struct task_struct *g, *task;
357
358         if (klp_signals_cnt == SIGNALS_TIMEOUT)
359                 pr_notice("signaling remaining tasks\n");
360
361         read_lock(&tasklist_lock);
362         for_each_process_thread(g, task) {
363                 if (!klp_patch_pending(task))
364                         continue;
365
366                 /*
367                  * There is a small race here. We could see TIF_PATCH_PENDING
368                  * set and decide to wake up a kthread or send a fake signal.
369                  * Meanwhile the task could migrate itself and the action
370                  * would be meaningless. It is not serious though.
371                  */
372                 if (task->flags & PF_KTHREAD) {
373                         /*
374                          * Wake up a kthread which sleeps interruptedly and
375                          * still has not been migrated.
376                          */
377                         wake_up_state(task, TASK_INTERRUPTIBLE);
378                 } else {
379                         /*
380                          * Send fake signal to all non-kthread tasks which are
381                          * still not migrated.
382                          */
383                         spin_lock_irq(&task->sighand->siglock);
384                         signal_wake_up(task, 0);
385                         spin_unlock_irq(&task->sighand->siglock);
386                 }
387         }
388         read_unlock(&tasklist_lock);
389 }
390
391 /*
392  * Try to switch all remaining tasks to the target patch state by walking the
393  * stacks of sleeping tasks and looking for any to-be-patched or
394  * to-be-unpatched functions.  If such functions are found, the task can't be
395  * switched yet.
396  *
397  * If any tasks are still stuck in the initial patch state, schedule a retry.
398  */
399 void klp_try_complete_transition(void)
400 {
401         unsigned int cpu;
402         struct task_struct *g, *task;
403         struct klp_patch *patch;
404         bool complete = true;
405
406         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
407
408         /*
409          * Try to switch the tasks to the target patch state by walking their
410          * stacks and looking for any to-be-patched or to-be-unpatched
411          * functions.  If such functions are found on a stack, or if the stack
412          * is deemed unreliable, the task can't be switched yet.
413          *
414          * Usually this will transition most (or all) of the tasks on a system
415          * unless the patch includes changes to a very common function.
416          */
417         read_lock(&tasklist_lock);
418         for_each_process_thread(g, task)
419                 if (!klp_try_switch_task(task))
420                         complete = false;
421         read_unlock(&tasklist_lock);
422
423         /*
424          * Ditto for the idle "swapper" tasks.
425          */
426         get_online_cpus();
427         for_each_possible_cpu(cpu) {
428                 task = idle_task(cpu);
429                 if (cpu_online(cpu)) {
430                         if (!klp_try_switch_task(task))
431                                 complete = false;
432                 } else if (task->patch_state != klp_target_state) {
433                         /* offline idle tasks can be switched immediately */
434                         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
435                         task->patch_state = klp_target_state;
436                 }
437         }
438         put_online_cpus();
439
440         if (!complete) {
441                 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
442                         klp_send_signals();
443                 klp_signals_cnt++;
444
445                 /*
446                  * Some tasks weren't able to be switched over.  Try again
447                  * later and/or wait for other methods like kernel exit
448                  * switching.
449                  */
450                 schedule_delayed_work(&klp_transition_work,
451                                       round_jiffies_relative(HZ));
452                 return;
453         }
454
455         /* we're done, now cleanup the data structures */
456         patch = klp_transition_patch;
457         klp_complete_transition();
458
459         /*
460          * It would make more sense to free the patch in
461          * klp_complete_transition() but it is called also
462          * from klp_cancel_transition().
463          */
464         if (!patch->enabled) {
465                 klp_free_patch_start(patch);
466                 schedule_work(&patch->free_work);
467         }
468 }
469
470 /*
471  * Start the transition to the specified target patch state so tasks can begin
472  * switching to it.
473  */
474 void klp_start_transition(void)
475 {
476         struct task_struct *g, *task;
477         unsigned int cpu;
478
479         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
480
481         pr_notice("'%s': starting %s transition\n",
482                   klp_transition_patch->mod->name,
483                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
484
485         /*
486          * Mark all normal tasks as needing a patch state update.  They'll
487          * switch either in klp_try_complete_transition() or as they exit the
488          * kernel.
489          */
490         read_lock(&tasklist_lock);
491         for_each_process_thread(g, task)
492                 if (task->patch_state != klp_target_state)
493                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
494         read_unlock(&tasklist_lock);
495
496         /*
497          * Mark all idle tasks as needing a patch state update.  They'll switch
498          * either in klp_try_complete_transition() or at the idle loop switch
499          * point.
500          */
501         for_each_possible_cpu(cpu) {
502                 task = idle_task(cpu);
503                 if (task->patch_state != klp_target_state)
504                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
505         }
506
507         klp_signals_cnt = 0;
508 }
509
510 /*
511  * Initialize the global target patch state and all tasks to the initial patch
512  * state, and initialize all function transition states to true in preparation
513  * for patching or unpatching.
514  */
515 void klp_init_transition(struct klp_patch *patch, int state)
516 {
517         struct task_struct *g, *task;
518         unsigned int cpu;
519         struct klp_object *obj;
520         struct klp_func *func;
521         int initial_state = !state;
522
523         WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
524
525         klp_transition_patch = patch;
526
527         /*
528          * Set the global target patch state which tasks will switch to.  This
529          * has no effect until the TIF_PATCH_PENDING flags get set later.
530          */
531         klp_target_state = state;
532
533         pr_debug("'%s': initializing %s transition\n", patch->mod->name,
534                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
535
536         /*
537          * Initialize all tasks to the initial patch state to prepare them for
538          * switching to the target state.
539          */
540         read_lock(&tasklist_lock);
541         for_each_process_thread(g, task) {
542                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
543                 task->patch_state = initial_state;
544         }
545         read_unlock(&tasklist_lock);
546
547         /*
548          * Ditto for the idle "swapper" tasks.
549          */
550         for_each_possible_cpu(cpu) {
551                 task = idle_task(cpu);
552                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
553                 task->patch_state = initial_state;
554         }
555
556         /*
557          * Enforce the order of the task->patch_state initializations and the
558          * func->transition updates to ensure that klp_ftrace_handler() doesn't
559          * see a func in transition with a task->patch_state of KLP_UNDEFINED.
560          *
561          * Also enforce the order of the klp_target_state write and future
562          * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
563          * set a task->patch_state to KLP_UNDEFINED.
564          */
565         smp_wmb();
566
567         /*
568          * Set the func transition states so klp_ftrace_handler() will know to
569          * switch to the transition logic.
570          *
571          * When patching, the funcs aren't yet in the func_stack and will be
572          * made visible to the ftrace handler shortly by the calls to
573          * klp_patch_object().
574          *
575          * When unpatching, the funcs are already in the func_stack and so are
576          * already visible to the ftrace handler.
577          */
578         klp_for_each_object(patch, obj)
579                 klp_for_each_func(obj, func)
580                         func->transition = true;
581 }
582
583 /*
584  * This function can be called in the middle of an existing transition to
585  * reverse the direction of the target patch state.  This can be done to
586  * effectively cancel an existing enable or disable operation if there are any
587  * tasks which are stuck in the initial patch state.
588  */
589 void klp_reverse_transition(void)
590 {
591         unsigned int cpu;
592         struct task_struct *g, *task;
593
594         pr_debug("'%s': reversing transition from %s\n",
595                  klp_transition_patch->mod->name,
596                  klp_target_state == KLP_PATCHED ? "patching to unpatching" :
597                                                    "unpatching to patching");
598
599         klp_transition_patch->enabled = !klp_transition_patch->enabled;
600
601         klp_target_state = !klp_target_state;
602
603         /*
604          * Clear all TIF_PATCH_PENDING flags to prevent races caused by
605          * klp_update_patch_state() running in parallel with
606          * klp_start_transition().
607          */
608         read_lock(&tasklist_lock);
609         for_each_process_thread(g, task)
610                 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
611         read_unlock(&tasklist_lock);
612
613         for_each_possible_cpu(cpu)
614                 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
615
616         /* Let any remaining calls to klp_update_patch_state() complete */
617         klp_synchronize_transition();
618
619         klp_start_transition();
620 }
621
622 /* Called from copy_process() during fork */
623 void klp_copy_process(struct task_struct *child)
624 {
625         child->patch_state = current->patch_state;
626
627         /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
628 }
629
630 /*
631  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
632  * existing transition to finish.
633  *
634  * NOTE: klp_update_patch_state(task) requires the task to be inactive or
635  * 'current'. This is not the case here and the consistency model could be
636  * broken. Administrator, who is the only one to execute the
637  * klp_force_transitions(), has to be aware of this.
638  */
639 void klp_force_transition(void)
640 {
641         struct klp_patch *patch;
642         struct task_struct *g, *task;
643         unsigned int cpu;
644
645         pr_warn("forcing remaining tasks to the patched state\n");
646
647         read_lock(&tasklist_lock);
648         for_each_process_thread(g, task)
649                 klp_update_patch_state(task);
650         read_unlock(&tasklist_lock);
651
652         for_each_possible_cpu(cpu)
653                 klp_update_patch_state(idle_task(cpu));
654
655         klp_for_each_patch(patch)
656                 patch->forced = true;
657 }