Merge tag 'certs-20220621' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells...
[linux-2.6-microblaze.git] / kernel / sched / wait.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic waiting primitives.
4  *
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  */
7
8 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
9 {
10         spin_lock_init(&wq_head->lock);
11         lockdep_set_class_and_name(&wq_head->lock, key, name);
12         INIT_LIST_HEAD(&wq_head->head);
13 }
14
15 EXPORT_SYMBOL(__init_waitqueue_head);
16
17 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
18 {
19         unsigned long flags;
20
21         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
22         spin_lock_irqsave(&wq_head->lock, flags);
23         __add_wait_queue(wq_head, wq_entry);
24         spin_unlock_irqrestore(&wq_head->lock, flags);
25 }
26 EXPORT_SYMBOL(add_wait_queue);
27
28 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
29 {
30         unsigned long flags;
31
32         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
33         spin_lock_irqsave(&wq_head->lock, flags);
34         __add_wait_queue_entry_tail(wq_head, wq_entry);
35         spin_unlock_irqrestore(&wq_head->lock, flags);
36 }
37 EXPORT_SYMBOL(add_wait_queue_exclusive);
38
39 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
40 {
41         unsigned long flags;
42
43         wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
44         spin_lock_irqsave(&wq_head->lock, flags);
45         __add_wait_queue(wq_head, wq_entry);
46         spin_unlock_irqrestore(&wq_head->lock, flags);
47 }
48 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
49
50 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
51 {
52         unsigned long flags;
53
54         spin_lock_irqsave(&wq_head->lock, flags);
55         __remove_wait_queue(wq_head, wq_entry);
56         spin_unlock_irqrestore(&wq_head->lock, flags);
57 }
58 EXPORT_SYMBOL(remove_wait_queue);
59
60 /*
61  * Scan threshold to break wait queue walk.
62  * This allows a waker to take a break from holding the
63  * wait queue lock during the wait queue walk.
64  */
65 #define WAITQUEUE_WALK_BREAK_CNT 64
66
67 /*
68  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
69  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
70  * number) then we wake that number of exclusive tasks, and potentially all
71  * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
72  * the list and any non-exclusive tasks will be woken first. A priority task
73  * may be at the head of the list, and can consume the event without any other
74  * tasks being woken.
75  *
76  * There are circumstances in which we can try to wake a task which has already
77  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
78  * zero in this (rare) case, and we handle it by continuing to scan the queue.
79  */
80 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
81                         int nr_exclusive, int wake_flags, void *key,
82                         wait_queue_entry_t *bookmark)
83 {
84         wait_queue_entry_t *curr, *next;
85         int cnt = 0;
86
87         lockdep_assert_held(&wq_head->lock);
88
89         if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
90                 curr = list_next_entry(bookmark, entry);
91
92                 list_del(&bookmark->entry);
93                 bookmark->flags = 0;
94         } else
95                 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
96
97         if (&curr->entry == &wq_head->head)
98                 return nr_exclusive;
99
100         list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101                 unsigned flags = curr->flags;
102                 int ret;
103
104                 if (flags & WQ_FLAG_BOOKMARK)
105                         continue;
106
107                 ret = curr->func(curr, mode, wake_flags, key);
108                 if (ret < 0)
109                         break;
110                 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
111                         break;
112
113                 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114                                 (&next->entry != &wq_head->head)) {
115                         bookmark->flags = WQ_FLAG_BOOKMARK;
116                         list_add_tail(&bookmark->entry, &next->entry);
117                         break;
118                 }
119         }
120
121         return nr_exclusive;
122 }
123
124 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125                         int nr_exclusive, int wake_flags, void *key)
126 {
127         unsigned long flags;
128         wait_queue_entry_t bookmark;
129
130         bookmark.flags = 0;
131         bookmark.private = NULL;
132         bookmark.func = NULL;
133         INIT_LIST_HEAD(&bookmark.entry);
134
135         do {
136                 spin_lock_irqsave(&wq_head->lock, flags);
137                 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
138                                                 wake_flags, key, &bookmark);
139                 spin_unlock_irqrestore(&wq_head->lock, flags);
140         } while (bookmark.flags & WQ_FLAG_BOOKMARK);
141 }
142
143 /**
144  * __wake_up - wake up threads blocked on a waitqueue.
145  * @wq_head: the waitqueue
146  * @mode: which threads
147  * @nr_exclusive: how many wake-one or wake-many threads to wake up
148  * @key: is directly passed to the wakeup function
149  *
150  * If this function wakes up a task, it executes a full memory barrier before
151  * accessing the task state.
152  */
153 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
154                         int nr_exclusive, void *key)
155 {
156         __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
157 }
158 EXPORT_SYMBOL(__wake_up);
159
160 /*
161  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
162  */
163 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
164 {
165         __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
166 }
167 EXPORT_SYMBOL_GPL(__wake_up_locked);
168
169 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
170 {
171         __wake_up_common(wq_head, mode, 1, 0, key, NULL);
172 }
173 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
174
175 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
176                 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
177 {
178         __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
179 }
180 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
181
182 /**
183  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
184  * @wq_head: the waitqueue
185  * @mode: which threads
186  * @key: opaque value to be passed to wakeup targets
187  *
188  * The sync wakeup differs that the waker knows that it will schedule
189  * away soon, so while the target thread will be woken up, it will not
190  * be migrated to another CPU - ie. the two threads are 'synchronized'
191  * with each other. This can prevent needless bouncing between CPUs.
192  *
193  * On UP it can prevent extra preemption.
194  *
195  * If this function wakes up a task, it executes a full memory barrier before
196  * accessing the task state.
197  */
198 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
199                         void *key)
200 {
201         if (unlikely(!wq_head))
202                 return;
203
204         __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
205 }
206 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
207
208 /**
209  * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
210  * @wq_head: the waitqueue
211  * @mode: which threads
212  * @key: opaque value to be passed to wakeup targets
213  *
214  * The sync wakeup differs in that the waker knows that it will schedule
215  * away soon, so while the target thread will be woken up, it will not
216  * be migrated to another CPU - ie. the two threads are 'synchronized'
217  * with each other. This can prevent needless bouncing between CPUs.
218  *
219  * On UP it can prevent extra preemption.
220  *
221  * If this function wakes up a task, it executes a full memory barrier before
222  * accessing the task state.
223  */
224 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
225                                unsigned int mode, void *key)
226 {
227         __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
228 }
229 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
230
231 /*
232  * __wake_up_sync - see __wake_up_sync_key()
233  */
234 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
235 {
236         __wake_up_sync_key(wq_head, mode, NULL);
237 }
238 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
239
240 void __wake_up_pollfree(struct wait_queue_head *wq_head)
241 {
242         __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
243         /* POLLFREE must have cleared the queue. */
244         WARN_ON_ONCE(waitqueue_active(wq_head));
245 }
246
247 /*
248  * Note: we use "set_current_state()" _after_ the wait-queue add,
249  * because we need a memory barrier there on SMP, so that any
250  * wake-function that tests for the wait-queue being active
251  * will be guaranteed to see waitqueue addition _or_ subsequent
252  * tests in this thread will see the wakeup having taken place.
253  *
254  * The spin_unlock() itself is semi-permeable and only protects
255  * one way (it only protects stuff inside the critical region and
256  * stops them from bleeding out - it would still allow subsequent
257  * loads to move into the critical region).
258  */
259 void
260 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
261 {
262         unsigned long flags;
263
264         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
265         spin_lock_irqsave(&wq_head->lock, flags);
266         if (list_empty(&wq_entry->entry))
267                 __add_wait_queue(wq_head, wq_entry);
268         set_current_state(state);
269         spin_unlock_irqrestore(&wq_head->lock, flags);
270 }
271 EXPORT_SYMBOL(prepare_to_wait);
272
273 /* Returns true if we are the first waiter in the queue, false otherwise. */
274 bool
275 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
276 {
277         unsigned long flags;
278         bool was_empty = false;
279
280         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
281         spin_lock_irqsave(&wq_head->lock, flags);
282         if (list_empty(&wq_entry->entry)) {
283                 was_empty = list_empty(&wq_head->head);
284                 __add_wait_queue_entry_tail(wq_head, wq_entry);
285         }
286         set_current_state(state);
287         spin_unlock_irqrestore(&wq_head->lock, flags);
288         return was_empty;
289 }
290 EXPORT_SYMBOL(prepare_to_wait_exclusive);
291
292 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
293 {
294         wq_entry->flags = flags;
295         wq_entry->private = current;
296         wq_entry->func = autoremove_wake_function;
297         INIT_LIST_HEAD(&wq_entry->entry);
298 }
299 EXPORT_SYMBOL(init_wait_entry);
300
301 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
302 {
303         unsigned long flags;
304         long ret = 0;
305
306         spin_lock_irqsave(&wq_head->lock, flags);
307         if (signal_pending_state(state, current)) {
308                 /*
309                  * Exclusive waiter must not fail if it was selected by wakeup,
310                  * it should "consume" the condition we were waiting for.
311                  *
312                  * The caller will recheck the condition and return success if
313                  * we were already woken up, we can not miss the event because
314                  * wakeup locks/unlocks the same wq_head->lock.
315                  *
316                  * But we need to ensure that set-condition + wakeup after that
317                  * can't see us, it should wake up another exclusive waiter if
318                  * we fail.
319                  */
320                 list_del_init(&wq_entry->entry);
321                 ret = -ERESTARTSYS;
322         } else {
323                 if (list_empty(&wq_entry->entry)) {
324                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
325                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
326                         else
327                                 __add_wait_queue(wq_head, wq_entry);
328                 }
329                 set_current_state(state);
330         }
331         spin_unlock_irqrestore(&wq_head->lock, flags);
332
333         return ret;
334 }
335 EXPORT_SYMBOL(prepare_to_wait_event);
336
337 /*
338  * Note! These two wait functions are entered with the
339  * wait-queue lock held (and interrupts off in the _irq
340  * case), so there is no race with testing the wakeup
341  * condition in the caller before they add the wait
342  * entry to the wake queue.
343  */
344 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
345 {
346         if (likely(list_empty(&wait->entry)))
347                 __add_wait_queue_entry_tail(wq, wait);
348
349         set_current_state(TASK_INTERRUPTIBLE);
350         if (signal_pending(current))
351                 return -ERESTARTSYS;
352
353         spin_unlock(&wq->lock);
354         schedule();
355         spin_lock(&wq->lock);
356
357         return 0;
358 }
359 EXPORT_SYMBOL(do_wait_intr);
360
361 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
362 {
363         if (likely(list_empty(&wait->entry)))
364                 __add_wait_queue_entry_tail(wq, wait);
365
366         set_current_state(TASK_INTERRUPTIBLE);
367         if (signal_pending(current))
368                 return -ERESTARTSYS;
369
370         spin_unlock_irq(&wq->lock);
371         schedule();
372         spin_lock_irq(&wq->lock);
373
374         return 0;
375 }
376 EXPORT_SYMBOL(do_wait_intr_irq);
377
378 /**
379  * finish_wait - clean up after waiting in a queue
380  * @wq_head: waitqueue waited on
381  * @wq_entry: wait descriptor
382  *
383  * Sets current thread back to running state and removes
384  * the wait descriptor from the given waitqueue if still
385  * queued.
386  */
387 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
388 {
389         unsigned long flags;
390
391         __set_current_state(TASK_RUNNING);
392         /*
393          * We can check for list emptiness outside the lock
394          * IFF:
395          *  - we use the "careful" check that verifies both
396          *    the next and prev pointers, so that there cannot
397          *    be any half-pending updates in progress on other
398          *    CPU's that we haven't seen yet (and that might
399          *    still change the stack area.
400          * and
401          *  - all other users take the lock (ie we can only
402          *    have _one_ other CPU that looks at or modifies
403          *    the list).
404          */
405         if (!list_empty_careful(&wq_entry->entry)) {
406                 spin_lock_irqsave(&wq_head->lock, flags);
407                 list_del_init(&wq_entry->entry);
408                 spin_unlock_irqrestore(&wq_head->lock, flags);
409         }
410 }
411 EXPORT_SYMBOL(finish_wait);
412
413 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
414 {
415         int ret = default_wake_function(wq_entry, mode, sync, key);
416
417         if (ret)
418                 list_del_init_careful(&wq_entry->entry);
419
420         return ret;
421 }
422 EXPORT_SYMBOL(autoremove_wake_function);
423
424 static inline bool is_kthread_should_stop(void)
425 {
426         return (current->flags & PF_KTHREAD) && kthread_should_stop();
427 }
428
429 /*
430  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
431  *
432  * add_wait_queue(&wq_head, &wait);
433  * for (;;) {
434  *     if (condition)
435  *         break;
436  *
437  *     // in wait_woken()                       // in woken_wake_function()
438  *
439  *     p->state = mode;                         wq_entry->flags |= WQ_FLAG_WOKEN;
440  *     smp_mb(); // A                           try_to_wake_up():
441  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))     <full barrier>
442  *         schedule()                              if (p->state & mode)
443  *     p->state = TASK_RUNNING;                       p->state = TASK_RUNNING;
444  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;       ~~~~~~~~~~~~~~~~~~
445  *     smp_mb(); // B                           condition = true;
446  * }                                            smp_mb(); // C
447  * remove_wait_queue(&wq_head, &wait);          wq_entry->flags |= WQ_FLAG_WOKEN;
448  */
449 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
450 {
451         /*
452          * The below executes an smp_mb(), which matches with the full barrier
453          * executed by the try_to_wake_up() in woken_wake_function() such that
454          * either we see the store to wq_entry->flags in woken_wake_function()
455          * or woken_wake_function() sees our store to current->state.
456          */
457         set_current_state(mode); /* A */
458         if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
459                 timeout = schedule_timeout(timeout);
460         __set_current_state(TASK_RUNNING);
461
462         /*
463          * The below executes an smp_mb(), which matches with the smp_mb() (C)
464          * in woken_wake_function() such that either we see the wait condition
465          * being true or the store to wq_entry->flags in woken_wake_function()
466          * follows ours in the coherence order.
467          */
468         smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
469
470         return timeout;
471 }
472 EXPORT_SYMBOL(wait_woken);
473
474 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
475 {
476         /* Pairs with the smp_store_mb() in wait_woken(). */
477         smp_mb(); /* C */
478         wq_entry->flags |= WQ_FLAG_WOKEN;
479
480         return default_wake_function(wq_entry, mode, sync, key);
481 }
482 EXPORT_SYMBOL(woken_wake_function);