Merge tag 'iommu-updates-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[linux-2.6-microblaze.git] / kernel / livepatch / transition.c
index 304d5eb..9c89ae8 100644 (file)
 #define MAX_STACK_ENTRIES  100
 #define STACK_ERR_BUF_SIZE 128
 
+#define SIGNALS_TIMEOUT 15
+
 struct klp_patch *klp_transition_patch;
 
 static int klp_target_state = KLP_UNDEFINED;
 
-static bool klp_forced = false;
+static unsigned int klp_signals_cnt;
 
 /*
  * This work can be performed periodically to finish patching or unpatching any
@@ -87,6 +89,11 @@ static void klp_complete_transition(void)
                 klp_transition_patch->mod->name,
                 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 
+       if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
+               klp_discard_replaced_patches(klp_transition_patch);
+               klp_discard_nops(klp_transition_patch);
+       }
+
        if (klp_target_state == KLP_UNPATCHED) {
                /*
                 * All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -136,13 +143,6 @@ static void klp_complete_transition(void)
        pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 
-       /*
-        * klp_forced set implies unbounded increase of module's ref count if
-        * the module is disabled/enabled in a loop.
-        */
-       if (!klp_forced && klp_target_state == KLP_UNPATCHED)
-               module_put(klp_transition_patch->mod);
-
        klp_target_state = KLP_UNDEFINED;
        klp_transition_patch = NULL;
 }
@@ -224,11 +224,11 @@ static int klp_check_stack_func(struct klp_func *func,
                         * Check for the to-be-patched function
                         * (the previous func).
                         */
-                       ops = klp_find_ops(func->old_addr);
+                       ops = klp_find_ops(func->old_func);
 
                        if (list_is_singular(&ops->func_stack)) {
                                /* original function */
-                               func_addr = func->old_addr;
+                               func_addr = (unsigned long)func->old_func;
                                func_size = func->old_size;
                        } else {
                                /* previously patched function */
@@ -347,6 +347,47 @@ done:
 
 }
 
+/*
+ * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
+ * Kthreads with TIF_PATCH_PENDING set are woken up.
+ */
+static void klp_send_signals(void)
+{
+       struct task_struct *g, *task;
+
+       if (klp_signals_cnt == SIGNALS_TIMEOUT)
+               pr_notice("signaling remaining tasks\n");
+
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, task) {
+               if (!klp_patch_pending(task))
+                       continue;
+
+               /*
+                * There is a small race here. We could see TIF_PATCH_PENDING
+                * set and decide to wake up a kthread or send a fake signal.
+                * Meanwhile the task could migrate itself and the action
+                * would be meaningless. It is not serious though.
+                */
+               if (task->flags & PF_KTHREAD) {
+                       /*
+                        * Wake up a kthread which sleeps interruptedly and
+                        * still has not been migrated.
+                        */
+                       wake_up_state(task, TASK_INTERRUPTIBLE);
+               } else {
+                       /*
+                        * Send fake signal to all non-kthread tasks which are
+                        * still not migrated.
+                        */
+                       spin_lock_irq(&task->sighand->siglock);
+                       signal_wake_up(task, 0);
+                       spin_unlock_irq(&task->sighand->siglock);
+               }
+       }
+       read_unlock(&tasklist_lock);
+}
+
 /*
  * Try to switch all remaining tasks to the target patch state by walking the
  * stacks of sleeping tasks and looking for any to-be-patched or
@@ -359,6 +400,7 @@ void klp_try_complete_transition(void)
 {
        unsigned int cpu;
        struct task_struct *g, *task;
+       struct klp_patch *patch;
        bool complete = true;
 
        WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
@@ -396,6 +438,10 @@ void klp_try_complete_transition(void)
        put_online_cpus();
 
        if (!complete) {
+               if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
+                       klp_send_signals();
+               klp_signals_cnt++;
+
                /*
                 * Some tasks weren't able to be switched over.  Try again
                 * later and/or wait for other methods like kernel exit
@@ -407,7 +453,18 @@ void klp_try_complete_transition(void)
        }
 
        /* we're done, now cleanup the data structures */
+       patch = klp_transition_patch;
        klp_complete_transition();
+
+       /*
+        * It would make more sense to free the patch in
+        * klp_complete_transition() but it is called also
+        * from klp_cancel_transition().
+        */
+       if (!patch->enabled) {
+               klp_free_patch_start(patch);
+               schedule_work(&patch->free_work);
+       }
 }
 
 /*
@@ -446,6 +503,8 @@ void klp_start_transition(void)
                if (task->patch_state != klp_target_state)
                        set_tsk_thread_flag(task, TIF_PATCH_PENDING);
        }
+
+       klp_signals_cnt = 0;
 }
 
 /*
@@ -568,47 +627,6 @@ void klp_copy_process(struct task_struct *child)
        /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
 }
 
-/*
- * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
- * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
- * action currently.
- */
-void klp_send_signals(void)
-{
-       struct task_struct *g, *task;
-
-       pr_notice("signaling remaining tasks\n");
-
-       read_lock(&tasklist_lock);
-       for_each_process_thread(g, task) {
-               if (!klp_patch_pending(task))
-                       continue;
-
-               /*
-                * There is a small race here. We could see TIF_PATCH_PENDING
-                * set and decide to wake up a kthread or send a fake signal.
-                * Meanwhile the task could migrate itself and the action
-                * would be meaningless. It is not serious though.
-                */
-               if (task->flags & PF_KTHREAD) {
-                       /*
-                        * Wake up a kthread which sleeps interruptedly and
-                        * still has not been migrated.
-                        */
-                       wake_up_state(task, TASK_INTERRUPTIBLE);
-               } else {
-                       /*
-                        * Send fake signal to all non-kthread tasks which are
-                        * still not migrated.
-                        */
-                       spin_lock_irq(&task->sighand->siglock);
-                       signal_wake_up(task, 0);
-                       spin_unlock_irq(&task->sighand->siglock);
-               }
-       }
-       read_unlock(&tasklist_lock);
-}
-
 /*
  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
  * existing transition to finish.
@@ -620,6 +638,7 @@ void klp_send_signals(void)
  */
 void klp_force_transition(void)
 {
+       struct klp_patch *patch;
        struct task_struct *g, *task;
        unsigned int cpu;
 
@@ -633,5 +652,6 @@ void klp_force_transition(void)
        for_each_possible_cpu(cpu)
                klp_update_patch_state(idle_task(cpu));
 
-       klp_forced = true;
+       klp_for_each_patch(patch)
+               patch->forced = true;
 }