2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/kallsyms.h>
29 #include <linux/livepatch.h>
30 #include <linux/elf.h>
31 #include <linux/moduleloader.h>
32 #include <linux/completion.h>
33 #include <asm/cacheflush.h>
36 #include "transition.h"
39 * klp_mutex is a coarse lock which serializes access to klp data. All
40 * accesses to klp-related variables and structures must have mutex protection,
41 * except within the following functions which carefully avoid the need for it:
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
46 DEFINE_MUTEX(klp_mutex);
49 * Actively used patches: enabled or in transition. Note that replaced
50 * or disabled patches are not listed even though the related kernel
51 * module still can be loaded.
53 LIST_HEAD(klp_patches);
55 static struct kobject *klp_root_kobj;
57 static bool klp_is_module(struct klp_object *obj)
62 /* sets obj->mod if object is not vmlinux and module is found */
63 static void klp_find_object_module(struct klp_object *obj)
67 if (!klp_is_module(obj))
70 mutex_lock(&module_mutex);
72 * We do not want to block removal of patched modules and therefore
73 * we do not take a reference here. The patches are removed by
74 * klp_module_going() instead.
76 mod = find_module(obj->name);
78 * Do not mess work of klp_module_coming() and klp_module_going().
79 * Note that the patch might still be needed before klp_module_going()
80 * is called. Module functions can be called even in the GOING state
81 * until mod->exit() finishes. This is especially important for
82 * patches that modify semantic of the functions.
84 if (mod && mod->klp_alive)
87 mutex_unlock(&module_mutex);
90 static bool klp_initialized(void)
92 return !!klp_root_kobj;
95 static struct klp_func *klp_find_func(struct klp_object *obj,
96 struct klp_func *old_func)
98 struct klp_func *func;
100 klp_for_each_func(obj, func) {
101 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
102 (old_func->old_sympos == func->old_sympos)) {
110 static struct klp_object *klp_find_object(struct klp_patch *patch,
111 struct klp_object *old_obj)
113 struct klp_object *obj;
115 klp_for_each_object(patch, obj) {
116 if (klp_is_module(old_obj)) {
117 if (klp_is_module(obj) &&
118 strcmp(old_obj->name, obj->name) == 0) {
121 } else if (!klp_is_module(obj)) {
129 struct klp_find_arg {
137 static int klp_find_callback(void *data, const char *name,
138 struct module *mod, unsigned long addr)
140 struct klp_find_arg *args = data;
142 if ((mod && !args->objname) || (!mod && args->objname))
145 if (strcmp(args->name, name))
148 if (args->objname && strcmp(args->objname, mod->name))
155 * Finish the search when the symbol is found for the desired position
156 * or the position is not defined for a non-unique symbol.
158 if ((args->pos && (args->count == args->pos)) ||
159 (!args->pos && (args->count > 1)))
165 static int klp_find_object_symbol(const char *objname, const char *name,
166 unsigned long sympos, unsigned long *addr)
168 struct klp_find_arg args = {
176 mutex_lock(&module_mutex);
178 module_kallsyms_on_each_symbol(klp_find_callback, &args);
180 kallsyms_on_each_symbol(klp_find_callback, &args);
181 mutex_unlock(&module_mutex);
184 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
185 * otherwise ensure the symbol position count matches sympos.
188 pr_err("symbol '%s' not found in symbol table\n", name);
189 else if (args.count > 1 && sympos == 0) {
190 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
192 } else if (sympos != args.count && sympos > 0) {
193 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
194 sympos, name, objname ? objname : "vmlinux");
204 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
206 int i, cnt, vmlinux, ret;
207 char objname[MODULE_NAME_LEN];
208 char symname[KSYM_NAME_LEN];
209 char *strtab = pmod->core_kallsyms.strtab;
212 unsigned long sympos, addr;
215 * Since the field widths for objname and symname in the sscanf()
216 * call are hard-coded and correspond to MODULE_NAME_LEN and
217 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
218 * and KSYM_NAME_LEN have the values we expect them to have.
220 * Because the value of MODULE_NAME_LEN can differ among architectures,
221 * we use the smallest/strictest upper bound possible (56, based on
222 * the current definition of MODULE_NAME_LEN) to prevent overflows.
224 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
226 relas = (Elf_Rela *) relasec->sh_addr;
227 /* For each rela in this klp relocation section */
228 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
229 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
230 if (sym->st_shndx != SHN_LIVEPATCH) {
231 pr_err("symbol %s is not marked as a livepatch symbol\n",
232 strtab + sym->st_name);
236 /* Format: .klp.sym.objname.symname,sympos */
237 cnt = sscanf(strtab + sym->st_name,
238 ".klp.sym.%55[^.].%127[^,],%lu",
239 objname, symname, &sympos);
241 pr_err("symbol %s has an incorrectly formatted name\n",
242 strtab + sym->st_name);
246 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
247 vmlinux = !strcmp(objname, "vmlinux");
248 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
249 symname, sympos, &addr);
253 sym->st_value = addr;
259 static int klp_write_object_relocations(struct module *pmod,
260 struct klp_object *obj)
263 const char *objname, *secname;
264 char sec_objname[MODULE_NAME_LEN];
267 if (WARN_ON(!klp_is_object_loaded(obj)))
270 objname = klp_is_module(obj) ? obj->name : "vmlinux";
272 /* For each klp relocation section */
273 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
274 sec = pmod->klp_info->sechdrs + i;
275 secname = pmod->klp_info->secstrings + sec->sh_name;
276 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
280 * Format: .klp.rela.sec_objname.section_name
281 * See comment in klp_resolve_symbols() for an explanation
282 * of the selected field width value.
284 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
286 pr_err("section %s has an incorrectly formatted name\n",
292 if (strcmp(objname, sec_objname))
295 ret = klp_resolve_symbols(sec, pmod);
299 ret = apply_relocate_add(pmod->klp_info->sechdrs,
300 pmod->core_kallsyms.strtab,
301 pmod->klp_info->symndx, i, pmod);
312 * /sys/kernel/livepatch
313 * /sys/kernel/livepatch/<patch>
314 * /sys/kernel/livepatch/<patch>/enabled
315 * /sys/kernel/livepatch/<patch>/transition
316 * /sys/kernel/livepatch/<patch>/force
317 * /sys/kernel/livepatch/<patch>/<object>
318 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
320 static int __klp_disable_patch(struct klp_patch *patch);
322 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
323 const char *buf, size_t count)
325 struct klp_patch *patch;
329 ret = kstrtobool(buf, &enabled);
333 patch = container_of(kobj, struct klp_patch, kobj);
335 mutex_lock(&klp_mutex);
337 if (patch->enabled == enabled) {
338 /* already in requested state */
344 * Allow to reverse a pending transition in both ways. It might be
345 * necessary to complete the transition without forcing and breaking
346 * the system integrity.
348 * Do not allow to re-enable a disabled patch.
350 if (patch == klp_transition_patch)
351 klp_reverse_transition();
353 ret = __klp_disable_patch(patch);
358 mutex_unlock(&klp_mutex);
365 static ssize_t enabled_show(struct kobject *kobj,
366 struct kobj_attribute *attr, char *buf)
368 struct klp_patch *patch;
370 patch = container_of(kobj, struct klp_patch, kobj);
371 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
374 static ssize_t transition_show(struct kobject *kobj,
375 struct kobj_attribute *attr, char *buf)
377 struct klp_patch *patch;
379 patch = container_of(kobj, struct klp_patch, kobj);
380 return snprintf(buf, PAGE_SIZE-1, "%d\n",
381 patch == klp_transition_patch);
384 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
385 const char *buf, size_t count)
387 struct klp_patch *patch;
391 ret = kstrtobool(buf, &val);
398 mutex_lock(&klp_mutex);
400 patch = container_of(kobj, struct klp_patch, kobj);
401 if (patch != klp_transition_patch) {
402 mutex_unlock(&klp_mutex);
406 klp_force_transition();
408 mutex_unlock(&klp_mutex);
413 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
414 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
415 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
416 static struct attribute *klp_patch_attrs[] = {
417 &enabled_kobj_attr.attr,
418 &transition_kobj_attr.attr,
419 &force_kobj_attr.attr,
423 static void klp_free_object_dynamic(struct klp_object *obj)
429 static struct klp_object *klp_alloc_object_dynamic(const char *name)
431 struct klp_object *obj;
433 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
438 obj->name = kstrdup(name, GFP_KERNEL);
445 INIT_LIST_HEAD(&obj->func_list);
451 static void klp_free_func_nop(struct klp_func *func)
453 kfree(func->old_name);
457 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
458 struct klp_object *obj)
460 struct klp_func *func;
462 func = kzalloc(sizeof(*func), GFP_KERNEL);
466 if (old_func->old_name) {
467 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
468 if (!func->old_name) {
475 * func->new_func is same as func->old_func. These addresses are
476 * set when the object is loaded, see klp_init_object_loaded().
478 func->old_sympos = old_func->old_sympos;
484 static int klp_add_object_nops(struct klp_patch *patch,
485 struct klp_object *old_obj)
487 struct klp_object *obj;
488 struct klp_func *func, *old_func;
490 obj = klp_find_object(patch, old_obj);
493 obj = klp_alloc_object_dynamic(old_obj->name);
497 list_add_tail(&obj->node, &patch->obj_list);
500 klp_for_each_func(old_obj, old_func) {
501 func = klp_find_func(obj, old_func);
505 func = klp_alloc_func_nop(old_func, obj);
509 list_add_tail(&func->node, &obj->func_list);
516 * Add 'nop' functions which simply return to the caller to run
517 * the original function. The 'nop' functions are added to a
518 * patch to facilitate a 'replace' mode.
520 static int klp_add_nops(struct klp_patch *patch)
522 struct klp_patch *old_patch;
523 struct klp_object *old_obj;
525 klp_for_each_patch(old_patch) {
526 klp_for_each_object(old_patch, old_obj) {
529 err = klp_add_object_nops(patch, old_obj);
538 static void klp_kobj_release_patch(struct kobject *kobj)
540 struct klp_patch *patch;
542 patch = container_of(kobj, struct klp_patch, kobj);
543 complete(&patch->finish);
546 static struct kobj_type klp_ktype_patch = {
547 .release = klp_kobj_release_patch,
548 .sysfs_ops = &kobj_sysfs_ops,
549 .default_attrs = klp_patch_attrs,
552 static void klp_kobj_release_object(struct kobject *kobj)
554 struct klp_object *obj;
556 obj = container_of(kobj, struct klp_object, kobj);
559 klp_free_object_dynamic(obj);
562 static struct kobj_type klp_ktype_object = {
563 .release = klp_kobj_release_object,
564 .sysfs_ops = &kobj_sysfs_ops,
567 static void klp_kobj_release_func(struct kobject *kobj)
569 struct klp_func *func;
571 func = container_of(kobj, struct klp_func, kobj);
574 klp_free_func_nop(func);
577 static struct kobj_type klp_ktype_func = {
578 .release = klp_kobj_release_func,
579 .sysfs_ops = &kobj_sysfs_ops,
582 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
584 struct klp_func *func, *tmp_func;
586 klp_for_each_func_safe(obj, func, tmp_func) {
587 if (nops_only && !func->nop)
590 list_del(&func->node);
592 /* Might be called from klp_init_patch() error path. */
593 if (func->kobj_added) {
594 kobject_put(&func->kobj);
595 } else if (func->nop) {
596 klp_free_func_nop(func);
601 /* Clean up when a patched object is unloaded */
602 static void klp_free_object_loaded(struct klp_object *obj)
604 struct klp_func *func;
608 klp_for_each_func(obj, func) {
609 func->old_func = NULL;
612 func->new_func = NULL;
616 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
618 struct klp_object *obj, *tmp_obj;
620 klp_for_each_object_safe(patch, obj, tmp_obj) {
621 __klp_free_funcs(obj, nops_only);
623 if (nops_only && !obj->dynamic)
626 list_del(&obj->node);
628 /* Might be called from klp_init_patch() error path. */
629 if (obj->kobj_added) {
630 kobject_put(&obj->kobj);
631 } else if (obj->dynamic) {
632 klp_free_object_dynamic(obj);
637 static void klp_free_objects(struct klp_patch *patch)
639 __klp_free_objects(patch, false);
642 static void klp_free_objects_dynamic(struct klp_patch *patch)
644 __klp_free_objects(patch, true);
648 * This function implements the free operations that can be called safely
651 * The operation must be completed by calling klp_free_patch_finish()
654 void klp_free_patch_start(struct klp_patch *patch)
656 if (!list_empty(&patch->list))
657 list_del(&patch->list);
659 klp_free_objects(patch);
663 * This function implements the free part that must be called outside
666 * It must be called after klp_free_patch_start(). And it has to be
667 * the last function accessing the livepatch structures when the patch
670 static void klp_free_patch_finish(struct klp_patch *patch)
673 * Avoid deadlock with enabled_store() sysfs callback by
674 * calling this outside klp_mutex. It is safe because
675 * this is called when the patch gets disabled and it
676 * cannot get enabled again.
678 if (patch->kobj_added) {
679 kobject_put(&patch->kobj);
680 wait_for_completion(&patch->finish);
683 /* Put the module after the last access to struct klp_patch. */
685 module_put(patch->mod);
689 * The livepatch might be freed from sysfs interface created by the patch.
690 * This work allows to wait until the interface is destroyed in a separate
693 static void klp_free_patch_work_fn(struct work_struct *work)
695 struct klp_patch *patch =
696 container_of(work, struct klp_patch, free_work);
698 klp_free_patch_finish(patch);
701 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
709 * NOPs get the address later. The patched module must be loaded,
710 * see klp_init_object_loaded().
712 if (!func->new_func && !func->nop)
715 if (strlen(func->old_name) >= KSYM_NAME_LEN)
718 INIT_LIST_HEAD(&func->stack_node);
719 func->patched = false;
720 func->transition = false;
722 /* The format for the sysfs directory is <function,sympos> where sympos
723 * is the nth occurrence of this symbol in kallsyms for the patched
724 * object. If the user selects 0 for old_sympos, then 1 will be used
725 * since a unique symbol will be the first occurrence.
727 ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
728 &obj->kobj, "%s,%lu", func->old_name,
729 func->old_sympos ? func->old_sympos : 1);
731 func->kobj_added = true;
736 /* Arches may override this to finish any remaining arch-specific tasks */
737 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
738 struct klp_object *obj)
742 /* parts of the initialization that is done only when the object is loaded */
743 static int klp_init_object_loaded(struct klp_patch *patch,
744 struct klp_object *obj)
746 struct klp_func *func;
749 module_disable_ro(patch->mod);
750 ret = klp_write_object_relocations(patch->mod, obj);
752 module_enable_ro(patch->mod, true);
756 arch_klp_init_object_loaded(patch, obj);
757 module_enable_ro(patch->mod, true);
759 klp_for_each_func(obj, func) {
760 ret = klp_find_object_symbol(obj->name, func->old_name,
762 (unsigned long *)&func->old_func);
766 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
767 &func->old_size, NULL);
769 pr_err("kallsyms size lookup failed for '%s'\n",
775 func->new_func = func->old_func;
777 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
778 &func->new_size, NULL);
780 pr_err("kallsyms size lookup failed for '%s' replacement\n",
789 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
791 struct klp_func *func;
795 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
798 obj->patched = false;
801 klp_find_object_module(obj);
803 name = klp_is_module(obj) ? obj->name : "vmlinux";
804 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
805 &patch->kobj, "%s", name);
808 obj->kobj_added = true;
810 klp_for_each_func(obj, func) {
811 ret = klp_init_func(obj, func);
816 if (klp_is_object_loaded(obj))
817 ret = klp_init_object_loaded(patch, obj);
822 static int klp_init_patch_early(struct klp_patch *patch)
824 struct klp_object *obj;
825 struct klp_func *func;
830 INIT_LIST_HEAD(&patch->list);
831 INIT_LIST_HEAD(&patch->obj_list);
832 patch->kobj_added = false;
833 patch->enabled = false;
834 patch->forced = false;
835 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
836 init_completion(&patch->finish);
838 klp_for_each_object_static(patch, obj) {
842 INIT_LIST_HEAD(&obj->func_list);
843 obj->kobj_added = false;
844 list_add_tail(&obj->node, &patch->obj_list);
846 klp_for_each_func_static(obj, func) {
847 func->kobj_added = false;
848 list_add_tail(&func->node, &obj->func_list);
852 if (!try_module_get(patch->mod))
858 static int klp_init_patch(struct klp_patch *patch)
860 struct klp_object *obj;
863 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
864 klp_root_kobj, "%s", patch->mod->name);
867 patch->kobj_added = true;
869 if (patch->replace) {
870 ret = klp_add_nops(patch);
875 klp_for_each_object(patch, obj) {
876 ret = klp_init_object(patch, obj);
881 list_add_tail(&patch->list, &klp_patches);
886 static int __klp_disable_patch(struct klp_patch *patch)
888 struct klp_object *obj;
890 if (WARN_ON(!patch->enabled))
893 if (klp_transition_patch)
896 klp_init_transition(patch, KLP_UNPATCHED);
898 klp_for_each_object(patch, obj)
900 klp_pre_unpatch_callback(obj);
903 * Enforce the order of the func->transition writes in
904 * klp_init_transition() and the TIF_PATCH_PENDING writes in
905 * klp_start_transition(). In the rare case where klp_ftrace_handler()
906 * is called shortly after klp_update_patch_state() switches the task,
907 * this ensures the handler sees that func->transition is set.
911 klp_start_transition();
912 patch->enabled = false;
913 klp_try_complete_transition();
918 static int __klp_enable_patch(struct klp_patch *patch)
920 struct klp_object *obj;
923 if (klp_transition_patch)
926 if (WARN_ON(patch->enabled))
929 if (!patch->kobj_added)
932 pr_notice("enabling patch '%s'\n", patch->mod->name);
934 klp_init_transition(patch, KLP_PATCHED);
937 * Enforce the order of the func->transition writes in
938 * klp_init_transition() and the ops->func_stack writes in
939 * klp_patch_object(), so that klp_ftrace_handler() will see the
940 * func->transition updates before the handler is registered and the
941 * new funcs become visible to the handler.
945 klp_for_each_object(patch, obj) {
946 if (!klp_is_object_loaded(obj))
949 ret = klp_pre_patch_callback(obj);
951 pr_warn("pre-patch callback failed for object '%s'\n",
952 klp_is_module(obj) ? obj->name : "vmlinux");
956 ret = klp_patch_object(obj);
958 pr_warn("failed to patch object '%s'\n",
959 klp_is_module(obj) ? obj->name : "vmlinux");
964 klp_start_transition();
965 patch->enabled = true;
966 klp_try_complete_transition();
970 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
972 klp_cancel_transition();
977 * klp_enable_patch() - enable the livepatch
978 * @patch: patch to be enabled
980 * Initializes the data structure associated with the patch, creates the sysfs
981 * interface, performs the needed symbol lookups and code relocations,
982 * registers the patched functions with ftrace.
984 * This function is supposed to be called from the livepatch module_init()
987 * Return: 0 on success, otherwise error
989 int klp_enable_patch(struct klp_patch *patch)
993 if (!patch || !patch->mod)
996 if (!is_livepatch_module(patch->mod)) {
997 pr_err("module %s is not marked as a livepatch module\n",
1002 if (!klp_initialized())
1005 if (!klp_have_reliable_stack()) {
1006 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
1011 mutex_lock(&klp_mutex);
1013 ret = klp_init_patch_early(patch);
1015 mutex_unlock(&klp_mutex);
1019 ret = klp_init_patch(patch);
1023 ret = __klp_enable_patch(patch);
1027 mutex_unlock(&klp_mutex);
1032 klp_free_patch_start(patch);
1034 mutex_unlock(&klp_mutex);
1036 klp_free_patch_finish(patch);
1040 EXPORT_SYMBOL_GPL(klp_enable_patch);
1043 * This function removes replaced patches.
1045 * We could be pretty aggressive here. It is called in the situation where
1046 * these structures are no longer accessible. All functions are redirected
1047 * by the klp_transition_patch. They use either a new code or they are in
1048 * the original code because of the special nop function patches.
1050 * The only exception is when the transition was forced. In this case,
1051 * klp_ftrace_handler() might still see the replaced patch on the stack.
1052 * Fortunately, it is carefully designed to work with removed functions
1053 * thanks to RCU. We only have to keep the patches on the system. Also
1054 * this is handled transparently by patch->module_put.
1056 void klp_discard_replaced_patches(struct klp_patch *new_patch)
1058 struct klp_patch *old_patch, *tmp_patch;
1060 klp_for_each_patch_safe(old_patch, tmp_patch) {
1061 if (old_patch == new_patch)
1064 old_patch->enabled = false;
1065 klp_unpatch_objects(old_patch);
1066 klp_free_patch_start(old_patch);
1067 schedule_work(&old_patch->free_work);
1072 * This function removes the dynamically allocated 'nop' functions.
1074 * We could be pretty aggressive. NOPs do not change the existing
1075 * behavior except for adding unnecessary delay by the ftrace handler.
1077 * It is safe even when the transition was forced. The ftrace handler
1078 * will see a valid ops->func_stack entry thanks to RCU.
1080 * We could even free the NOPs structures. They must be the last entry
1081 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1082 * It does the same as klp_synchronize_transition() to make sure that
1083 * nobody is inside the ftrace handler once the operation finishes.
1085 * IMPORTANT: It must be called right after removing the replaced patches!
1087 void klp_discard_nops(struct klp_patch *new_patch)
1089 klp_unpatch_objects_dynamic(klp_transition_patch);
1090 klp_free_objects_dynamic(klp_transition_patch);
1094 * Remove parts of patches that touch a given kernel module. The list of
1095 * patches processed might be limited. When limit is NULL, all patches
1098 static void klp_cleanup_module_patches_limited(struct module *mod,
1099 struct klp_patch *limit)
1101 struct klp_patch *patch;
1102 struct klp_object *obj;
1104 klp_for_each_patch(patch) {
1108 klp_for_each_object(patch, obj) {
1109 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1112 if (patch != klp_transition_patch)
1113 klp_pre_unpatch_callback(obj);
1115 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1116 patch->mod->name, obj->mod->name);
1117 klp_unpatch_object(obj);
1119 klp_post_unpatch_callback(obj);
1121 klp_free_object_loaded(obj);
1127 int klp_module_coming(struct module *mod)
1130 struct klp_patch *patch;
1131 struct klp_object *obj;
1133 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1136 mutex_lock(&klp_mutex);
1138 * Each module has to know that klp_module_coming()
1139 * has been called. We never know what module will
1140 * get patched by a new patch.
1142 mod->klp_alive = true;
1144 klp_for_each_patch(patch) {
1145 klp_for_each_object(patch, obj) {
1146 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1151 ret = klp_init_object_loaded(patch, obj);
1153 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1154 patch->mod->name, obj->mod->name, ret);
1158 pr_notice("applying patch '%s' to loading module '%s'\n",
1159 patch->mod->name, obj->mod->name);
1161 ret = klp_pre_patch_callback(obj);
1163 pr_warn("pre-patch callback failed for object '%s'\n",
1168 ret = klp_patch_object(obj);
1170 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1171 patch->mod->name, obj->mod->name, ret);
1173 klp_post_unpatch_callback(obj);
1177 if (patch != klp_transition_patch)
1178 klp_post_patch_callback(obj);
1184 mutex_unlock(&klp_mutex);
1190 * If a patch is unsuccessfully applied, return
1191 * error to the module loader.
1193 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1194 patch->mod->name, obj->mod->name, obj->mod->name);
1195 mod->klp_alive = false;
1196 klp_cleanup_module_patches_limited(mod, patch);
1197 mutex_unlock(&klp_mutex);
1202 void klp_module_going(struct module *mod)
1204 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1205 mod->state != MODULE_STATE_COMING))
1208 mutex_lock(&klp_mutex);
1210 * Each module has to know that klp_module_going()
1211 * has been called. We never know what module will
1212 * get patched by a new patch.
1214 mod->klp_alive = false;
1216 klp_cleanup_module_patches_limited(mod, NULL);
1218 mutex_unlock(&klp_mutex);
1221 static int __init klp_init(void)
1225 ret = klp_check_compiler_support();
1227 pr_info("Your compiler is too old; turning off.\n");
1231 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1238 module_init(klp_init);