Merge tag 'x86_sev_for_v5.14_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / kthread.c
index fe3f2a4..0fccf7d 100644 (file)
@@ -1093,8 +1093,38 @@ void kthread_flush_work(struct kthread_work *work)
 EXPORT_SYMBOL_GPL(kthread_flush_work);
 
 /*
- * This function removes the work from the worker queue. Also it makes sure
- * that it won't get queued later via the delayed work's timer.
+ * Make sure that the timer is neither set nor running and could
+ * not manipulate the work list_head any longer.
+ *
+ * The function is called under worker->lock. The lock is temporary
+ * released but the timer can't be set again in the meantime.
+ */
+static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
+                                             unsigned long *flags)
+{
+       struct kthread_delayed_work *dwork =
+               container_of(work, struct kthread_delayed_work, work);
+       struct kthread_worker *worker = work->worker;
+
+       /*
+        * del_timer_sync() must be called to make sure that the timer
+        * callback is not running. The lock must be temporary released
+        * to avoid a deadlock with the callback. In the meantime,
+        * any queuing is blocked by setting the canceling counter.
+        */
+       work->canceling++;
+       raw_spin_unlock_irqrestore(&worker->lock, *flags);
+       del_timer_sync(&dwork->timer);
+       raw_spin_lock_irqsave(&worker->lock, *flags);
+       work->canceling--;
+}
+
+/*
+ * This function removes the work from the worker queue.
+ *
+ * It is called under worker->lock. The caller must make sure that
+ * the timer used by delayed work is not running, e.g. by calling
+ * kthread_cancel_delayed_work_timer().
  *
  * The work might still be in use when this function finishes. See the
  * current_work proceed by the worker.
@@ -1102,28 +1132,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
  * Return: %true if @work was pending and successfully canceled,
  *     %false if @work was not pending
  */
-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
-                                 unsigned long *flags)
+static bool __kthread_cancel_work(struct kthread_work *work)
 {
-       /* Try to cancel the timer if exists. */
-       if (is_dwork) {
-               struct kthread_delayed_work *dwork =
-                       container_of(work, struct kthread_delayed_work, work);
-               struct kthread_worker *worker = work->worker;
-
-               /*
-                * del_timer_sync() must be called to make sure that the timer
-                * callback is not running. The lock must be temporary released
-                * to avoid a deadlock with the callback. In the meantime,
-                * any queuing is blocked by setting the canceling counter.
-                */
-               work->canceling++;
-               raw_spin_unlock_irqrestore(&worker->lock, *flags);
-               del_timer_sync(&dwork->timer);
-               raw_spin_lock_irqsave(&worker->lock, *flags);
-               work->canceling--;
-       }
-
        /*
         * Try to remove the work from a worker list. It might either
         * be from worker->work_list or from worker->delayed_work_list.
@@ -1176,11 +1186,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
        /* Work must not be used with >1 worker, see kthread_queue_work() */
        WARN_ON_ONCE(work->worker != worker);
 
-       /* Do not fight with another command that is canceling this work. */
+       /*
+        * Temporary cancel the work but do not fight with another command
+        * that is canceling the work as well.
+        *
+        * It is a bit tricky because of possible races with another
+        * mod_delayed_work() and cancel_delayed_work() callers.
+        *
+        * The timer must be canceled first because worker->lock is released
+        * when doing so. But the work can be removed from the queue (list)
+        * only when it can be queued again so that the return value can
+        * be used for reference counting.
+        */
+       kthread_cancel_delayed_work_timer(work, &flags);
        if (work->canceling)
                goto out;
+       ret = __kthread_cancel_work(work);
 
-       ret = __kthread_cancel_work(work, true, &flags);
 fast_queue:
        __kthread_queue_delayed_work(worker, dwork, delay);
 out:
@@ -1202,7 +1224,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
-       ret = __kthread_cancel_work(work, is_dwork, &flags);
+       if (is_dwork)
+               kthread_cancel_delayed_work_timer(work, &flags);
+
+       ret = __kthread_cancel_work(work);
 
        if (worker->current_work != work)
                goto out_fast;