Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[linux-2.6-microblaze.git] / include / linux / wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE       0x01
21 #define WQ_FLAG_WOKEN           0x02
22 #define WQ_FLAG_BOOKMARK        0x04
23 #define WQ_FLAG_CUSTOM          0x08
24 #define WQ_FLAG_DONE            0x10
25 #define WQ_FLAG_PRIORITY        0x20
26
27 /*
28  * A single wait-queue entry structure:
29  */
30 struct wait_queue_entry {
31         unsigned int            flags;
32         void                    *private;
33         wait_queue_func_t       func;
34         struct list_head        entry;
35 };
36
37 struct wait_queue_head {
38         spinlock_t              lock;
39         struct list_head        head;
40 };
41 typedef struct wait_queue_head wait_queue_head_t;
42
43 struct task_struct;
44
45 /*
46  * Macros for declaration and initialisaton of the datatypes
47  */
48
49 #define __WAITQUEUE_INITIALIZER(name, tsk) {                                    \
50         .private        = tsk,                                                  \
51         .func           = default_wake_function,                                \
52         .entry          = { NULL, NULL } }
53
54 #define DECLARE_WAITQUEUE(name, tsk)                                            \
55         struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
56
57 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                   \
58         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
59         .head           = { &(name).head, &(name).head } }
60
61 #define DECLARE_WAIT_QUEUE_HEAD(name) \
62         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
63
64 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
65
66 #define init_waitqueue_head(wq_head)                                            \
67         do {                                                                    \
68                 static struct lock_class_key __key;                             \
69                                                                                 \
70                 __init_waitqueue_head((wq_head), #wq_head, &__key);             \
71         } while (0)
72
73 #ifdef CONFIG_LOCKDEP
74 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75         ({ init_waitqueue_head(&name); name; })
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
77         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
78 #else
79 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80 #endif
81
82 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
83 {
84         wq_entry->flags         = 0;
85         wq_entry->private       = p;
86         wq_entry->func          = default_wake_function;
87 }
88
89 static inline void
90 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
91 {
92         wq_entry->flags         = 0;
93         wq_entry->private       = NULL;
94         wq_entry->func          = func;
95 }
96
97 /**
98  * waitqueue_active -- locklessly test for waiters on the queue
99  * @wq_head: the waitqueue to test for waiters
100  *
101  * returns true if the wait list is not empty
102  *
103  * NOTE: this function is lockless and requires care, incorrect usage _will_
104  * lead to sporadic and non-obvious failure.
105  *
106  * Use either while holding wait_queue_head::lock or when used for wakeups
107  * with an extra smp_mb() like::
108  *
109  *      CPU0 - waker                    CPU1 - waiter
110  *
111  *                                      for (;;) {
112  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
113  *      smp_mb();                         // smp_mb() from set_current_state()
114  *      if (waitqueue_active(wq_head))         if (@cond)
115  *        wake_up(wq_head);                      break;
116  *                                        schedule();
117  *                                      }
118  *                                      finish_wait(&wq_head, &wait);
119  *
120  * Because without the explicit smp_mb() it's possible for the
121  * waitqueue_active() load to get hoisted over the @cond store such that we'll
122  * observe an empty wait list while the waiter might not observe @cond.
123  *
124  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
125  * which (when the lock is uncontended) are of roughly equal cost.
126  */
127 static inline int waitqueue_active(struct wait_queue_head *wq_head)
128 {
129         return !list_empty(&wq_head->head);
130 }
131
132 /**
133  * wq_has_single_sleeper - check if there is only one sleeper
134  * @wq_head: wait queue head
135  *
136  * Returns true of wq_head has only one sleeper on the list.
137  *
138  * Please refer to the comment for waitqueue_active.
139  */
140 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141 {
142         return list_is_singular(&wq_head->head);
143 }
144
145 /**
146  * wq_has_sleeper - check if there are any waiting processes
147  * @wq_head: wait queue head
148  *
149  * Returns true if wq_head has waiting processes
150  *
151  * Please refer to the comment for waitqueue_active.
152  */
153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154 {
155         /*
156          * We need to be sure we are in sync with the
157          * add_wait_queue modifications to the wait queue.
158          *
159          * This memory barrier should be paired with one on the
160          * waiting side.
161          */
162         smp_mb();
163         return waitqueue_active(wq_head);
164 }
165
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170
171 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172 {
173         struct list_head *head = &wq_head->head;
174         struct wait_queue_entry *wq;
175
176         list_for_each_entry(wq, &wq_head->head, entry) {
177                 if (!(wq->flags & WQ_FLAG_PRIORITY))
178                         break;
179                 head = &wq->entry;
180         }
181         list_add(&wq_entry->entry, head);
182 }
183
184 /*
185  * Used for wake-one threads:
186  */
187 static inline void
188 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189 {
190         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191         __add_wait_queue(wq_head, wq_entry);
192 }
193
194 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195 {
196         list_add_tail(&wq_entry->entry, &wq_head->head);
197 }
198
199 static inline void
200 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201 {
202         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203         __add_wait_queue_entry_tail(wq_head, wq_entry);
204 }
205
206 static inline void
207 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208 {
209         list_del(&wq_entry->entry);
210 }
211
212 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
215                 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
216 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
218 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
219 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
220
221 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
222 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
223 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
224 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
225 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
226
227 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
228 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
229 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
230 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE)
231
232 /*
233  * Wakeup macros to be used to report events to the targets.
234  */
235 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
236 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
237 #define wake_up_poll(x, m)                                                      \
238         __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
239 #define wake_up_locked_poll(x, m)                                               \
240         __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
241 #define wake_up_interruptible_poll(x, m)                                        \
242         __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
243 #define wake_up_interruptible_sync_poll(x, m)                                   \
244         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
245 #define wake_up_interruptible_sync_poll_locked(x, m)                            \
246         __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
247
248 #define ___wait_cond_timeout(condition)                                         \
249 ({                                                                              \
250         bool __cond = (condition);                                              \
251         if (__cond && !__ret)                                                   \
252                 __ret = 1;                                                      \
253         __cond || !__ret;                                                       \
254 })
255
256 #define ___wait_is_interruptible(state)                                         \
257         (!__builtin_constant_p(state) ||                                        \
258                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
259
260 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
261
262 /*
263  * The below macro ___wait_event() has an explicit shadow of the __ret
264  * variable when used from the wait_event_*() macros.
265  *
266  * This is so that both can use the ___wait_cond_timeout() construct
267  * to wrap the condition.
268  *
269  * The type inconsistency of the wait_event_*() __ret variable is also
270  * on purpose; we use long where we can return timeout values and int
271  * otherwise.
272  */
273
274 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)           \
275 ({                                                                              \
276         __label__ __out;                                                        \
277         struct wait_queue_entry __wq_entry;                                     \
278         long __ret = ret;       /* explicit shadow */                           \
279                                                                                 \
280         init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);        \
281         for (;;) {                                                              \
282                 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
283                                                                                 \
284                 if (condition)                                                  \
285                         break;                                                  \
286                                                                                 \
287                 if (___wait_is_interruptible(state) && __int) {                 \
288                         __ret = __int;                                          \
289                         goto __out;                                             \
290                 }                                                               \
291                                                                                 \
292                 cmd;                                                            \
293         }                                                                       \
294         finish_wait(&wq_head, &__wq_entry);                                     \
295 __out:  __ret;                                                                  \
296 })
297
298 #define __wait_event(wq_head, condition)                                        \
299         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
300                             schedule())
301
302 /**
303  * wait_event - sleep until a condition gets true
304  * @wq_head: the waitqueue to wait on
305  * @condition: a C expression for the event to wait for
306  *
307  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
308  * @condition evaluates to true. The @condition is checked each time
309  * the waitqueue @wq_head is woken up.
310  *
311  * wake_up() has to be called after changing any variable that could
312  * change the result of the wait condition.
313  */
314 #define wait_event(wq_head, condition)                                          \
315 do {                                                                            \
316         might_sleep();                                                          \
317         if (condition)                                                          \
318                 break;                                                          \
319         __wait_event(wq_head, condition);                                       \
320 } while (0)
321
322 #define __io_wait_event(wq_head, condition)                                     \
323         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
324                             io_schedule())
325
326 /*
327  * io_wait_event() -- like wait_event() but with io_schedule()
328  */
329 #define io_wait_event(wq_head, condition)                                       \
330 do {                                                                            \
331         might_sleep();                                                          \
332         if (condition)                                                          \
333                 break;                                                          \
334         __io_wait_event(wq_head, condition);                                    \
335 } while (0)
336
337 #define __wait_event_freezable(wq_head, condition)                              \
338         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
339                             freezable_schedule())
340
341 /**
342  * wait_event_freezable - sleep (or freeze) until a condition gets true
343  * @wq_head: the waitqueue to wait on
344  * @condition: a C expression for the event to wait for
345  *
346  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
347  * to system load) until the @condition evaluates to true. The
348  * @condition is checked each time the waitqueue @wq_head is woken up.
349  *
350  * wake_up() has to be called after changing any variable that could
351  * change the result of the wait condition.
352  */
353 #define wait_event_freezable(wq_head, condition)                                \
354 ({                                                                              \
355         int __ret = 0;                                                          \
356         might_sleep();                                                          \
357         if (!(condition))                                                       \
358                 __ret = __wait_event_freezable(wq_head, condition);             \
359         __ret;                                                                  \
360 })
361
362 #define __wait_event_timeout(wq_head, condition, timeout)                       \
363         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
364                       TASK_UNINTERRUPTIBLE, 0, timeout,                         \
365                       __ret = schedule_timeout(__ret))
366
367 /**
368  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
369  * @wq_head: the waitqueue to wait on
370  * @condition: a C expression for the event to wait for
371  * @timeout: timeout, in jiffies
372  *
373  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
374  * @condition evaluates to true. The @condition is checked each time
375  * the waitqueue @wq_head is woken up.
376  *
377  * wake_up() has to be called after changing any variable that could
378  * change the result of the wait condition.
379  *
380  * Returns:
381  * 0 if the @condition evaluated to %false after the @timeout elapsed,
382  * 1 if the @condition evaluated to %true after the @timeout elapsed,
383  * or the remaining jiffies (at least 1) if the @condition evaluated
384  * to %true before the @timeout elapsed.
385  */
386 #define wait_event_timeout(wq_head, condition, timeout)                         \
387 ({                                                                              \
388         long __ret = timeout;                                                   \
389         might_sleep();                                                          \
390         if (!___wait_cond_timeout(condition))                                   \
391                 __ret = __wait_event_timeout(wq_head, condition, timeout);      \
392         __ret;                                                                  \
393 })
394
395 #define __wait_event_freezable_timeout(wq_head, condition, timeout)             \
396         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
397                       TASK_INTERRUPTIBLE, 0, timeout,                           \
398                       __ret = freezable_schedule_timeout(__ret))
399
400 /*
401  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
402  * increasing load and is freezable.
403  */
404 #define wait_event_freezable_timeout(wq_head, condition, timeout)               \
405 ({                                                                              \
406         long __ret = timeout;                                                   \
407         might_sleep();                                                          \
408         if (!___wait_cond_timeout(condition))                                   \
409                 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
410         __ret;                                                                  \
411 })
412
413 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)              \
414         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,     \
415                             cmd1; schedule(); cmd2)
416 /*
417  * Just like wait_event_cmd(), except it sets exclusive flag
418  */
419 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)                \
420 do {                                                                            \
421         if (condition)                                                          \
422                 break;                                                          \
423         __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);             \
424 } while (0)
425
426 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)                        \
427         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
428                             cmd1; schedule(); cmd2)
429
430 /**
431  * wait_event_cmd - sleep until a condition gets true
432  * @wq_head: the waitqueue to wait on
433  * @condition: a C expression for the event to wait for
434  * @cmd1: the command will be executed before sleep
435  * @cmd2: the command will be executed after sleep
436  *
437  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
438  * @condition evaluates to true. The @condition is checked each time
439  * the waitqueue @wq_head is woken up.
440  *
441  * wake_up() has to be called after changing any variable that could
442  * change the result of the wait condition.
443  */
444 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)                          \
445 do {                                                                            \
446         if (condition)                                                          \
447                 break;                                                          \
448         __wait_event_cmd(wq_head, condition, cmd1, cmd2);                       \
449 } while (0)
450
451 #define __wait_event_interruptible(wq_head, condition)                          \
452         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
453                       schedule())
454
455 /**
456  * wait_event_interruptible - sleep until a condition gets true
457  * @wq_head: the waitqueue to wait on
458  * @condition: a C expression for the event to wait for
459  *
460  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
461  * @condition evaluates to true or a signal is received.
462  * The @condition is checked each time the waitqueue @wq_head is woken up.
463  *
464  * wake_up() has to be called after changing any variable that could
465  * change the result of the wait condition.
466  *
467  * The function will return -ERESTARTSYS if it was interrupted by a
468  * signal and 0 if @condition evaluated to true.
469  */
470 #define wait_event_interruptible(wq_head, condition)                            \
471 ({                                                                              \
472         int __ret = 0;                                                          \
473         might_sleep();                                                          \
474         if (!(condition))                                                       \
475                 __ret = __wait_event_interruptible(wq_head, condition);         \
476         __ret;                                                                  \
477 })
478
479 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)         \
480         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
481                       TASK_INTERRUPTIBLE, 0, timeout,                           \
482                       __ret = schedule_timeout(__ret))
483
484 /**
485  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
486  * @wq_head: the waitqueue to wait on
487  * @condition: a C expression for the event to wait for
488  * @timeout: timeout, in jiffies
489  *
490  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
491  * @condition evaluates to true or a signal is received.
492  * The @condition is checked each time the waitqueue @wq_head is woken up.
493  *
494  * wake_up() has to be called after changing any variable that could
495  * change the result of the wait condition.
496  *
497  * Returns:
498  * 0 if the @condition evaluated to %false after the @timeout elapsed,
499  * 1 if the @condition evaluated to %true after the @timeout elapsed,
500  * the remaining jiffies (at least 1) if the @condition evaluated
501  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
502  * interrupted by a signal.
503  */
504 #define wait_event_interruptible_timeout(wq_head, condition, timeout)           \
505 ({                                                                              \
506         long __ret = timeout;                                                   \
507         might_sleep();                                                          \
508         if (!___wait_cond_timeout(condition))                                   \
509                 __ret = __wait_event_interruptible_timeout(wq_head,             \
510                                                 condition, timeout);            \
511         __ret;                                                                  \
512 })
513
514 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)              \
515 ({                                                                              \
516         int __ret = 0;                                                          \
517         struct hrtimer_sleeper __t;                                             \
518                                                                                 \
519         hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,                    \
520                                       HRTIMER_MODE_REL);                        \
521         if ((timeout) != KTIME_MAX)                                             \
522                 hrtimer_start_range_ns(&__t.timer, timeout,                     \
523                                        current->timer_slack_ns,                 \
524                                        HRTIMER_MODE_REL);                       \
525                                                                                 \
526         __ret = ___wait_event(wq_head, condition, state, 0, 0,                  \
527                 if (!__t.task) {                                                \
528                         __ret = -ETIME;                                         \
529                         break;                                                  \
530                 }                                                               \
531                 schedule());                                                    \
532                                                                                 \
533         hrtimer_cancel(&__t.timer);                                             \
534         destroy_hrtimer_on_stack(&__t.timer);                                   \
535         __ret;                                                                  \
536 })
537
538 /**
539  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
540  * @wq_head: the waitqueue to wait on
541  * @condition: a C expression for the event to wait for
542  * @timeout: timeout, as a ktime_t
543  *
544  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
545  * @condition evaluates to true or a signal is received.
546  * The @condition is checked each time the waitqueue @wq_head is woken up.
547  *
548  * wake_up() has to be called after changing any variable that could
549  * change the result of the wait condition.
550  *
551  * The function returns 0 if @condition became true, or -ETIME if the timeout
552  * elapsed.
553  */
554 #define wait_event_hrtimeout(wq_head, condition, timeout)                       \
555 ({                                                                              \
556         int __ret = 0;                                                          \
557         might_sleep();                                                          \
558         if (!(condition))                                                       \
559                 __ret = __wait_event_hrtimeout(wq_head, condition, timeout,     \
560                                                TASK_UNINTERRUPTIBLE);           \
561         __ret;                                                                  \
562 })
563
564 /**
565  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
566  * @wq: the waitqueue to wait on
567  * @condition: a C expression for the event to wait for
568  * @timeout: timeout, as a ktime_t
569  *
570  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
571  * @condition evaluates to true or a signal is received.
572  * The @condition is checked each time the waitqueue @wq is woken up.
573  *
574  * wake_up() has to be called after changing any variable that could
575  * change the result of the wait condition.
576  *
577  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
578  * interrupted by a signal, or -ETIME if the timeout elapsed.
579  */
580 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)              \
581 ({                                                                              \
582         long __ret = 0;                                                         \
583         might_sleep();                                                          \
584         if (!(condition))                                                       \
585                 __ret = __wait_event_hrtimeout(wq, condition, timeout,          \
586                                                TASK_INTERRUPTIBLE);             \
587         __ret;                                                                  \
588 })
589
590 #define __wait_event_interruptible_exclusive(wq, condition)                     \
591         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
592                       schedule())
593
594 #define wait_event_interruptible_exclusive(wq, condition)                       \
595 ({                                                                              \
596         int __ret = 0;                                                          \
597         might_sleep();                                                          \
598         if (!(condition))                                                       \
599                 __ret = __wait_event_interruptible_exclusive(wq, condition);    \
600         __ret;                                                                  \
601 })
602
603 #define __wait_event_killable_exclusive(wq, condition)                          \
604         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,                       \
605                       schedule())
606
607 #define wait_event_killable_exclusive(wq, condition)                            \
608 ({                                                                              \
609         int __ret = 0;                                                          \
610         might_sleep();                                                          \
611         if (!(condition))                                                       \
612                 __ret = __wait_event_killable_exclusive(wq, condition);         \
613         __ret;                                                                  \
614 })
615
616
617 #define __wait_event_freezable_exclusive(wq, condition)                         \
618         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
619                         freezable_schedule())
620
621 #define wait_event_freezable_exclusive(wq, condition)                           \
622 ({                                                                              \
623         int __ret = 0;                                                          \
624         might_sleep();                                                          \
625         if (!(condition))                                                       \
626                 __ret = __wait_event_freezable_exclusive(wq, condition);        \
627         __ret;                                                                  \
628 })
629
630 /**
631  * wait_event_idle - wait for a condition without contributing to system load
632  * @wq_head: the waitqueue to wait on
633  * @condition: a C expression for the event to wait for
634  *
635  * The process is put to sleep (TASK_IDLE) until the
636  * @condition evaluates to true.
637  * The @condition is checked each time the waitqueue @wq_head is woken up.
638  *
639  * wake_up() has to be called after changing any variable that could
640  * change the result of the wait condition.
641  *
642  */
643 #define wait_event_idle(wq_head, condition)                                     \
644 do {                                                                            \
645         might_sleep();                                                          \
646         if (!(condition))                                                       \
647                 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
648 } while (0)
649
650 /**
651  * wait_event_idle_exclusive - wait for a condition with contributing to system load
652  * @wq_head: the waitqueue to wait on
653  * @condition: a C expression for the event to wait for
654  *
655  * The process is put to sleep (TASK_IDLE) until the
656  * @condition evaluates to true.
657  * The @condition is checked each time the waitqueue @wq_head is woken up.
658  *
659  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
660  * set thus if other processes wait on the same list, when this
661  * process is woken further processes are not considered.
662  *
663  * wake_up() has to be called after changing any variable that could
664  * change the result of the wait condition.
665  *
666  */
667 #define wait_event_idle_exclusive(wq_head, condition)                           \
668 do {                                                                            \
669         might_sleep();                                                          \
670         if (!(condition))                                                       \
671                 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
672 } while (0)
673
674 #define __wait_event_idle_timeout(wq_head, condition, timeout)                  \
675         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
676                       TASK_IDLE, 0, timeout,                                    \
677                       __ret = schedule_timeout(__ret))
678
679 /**
680  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
681  * @wq_head: the waitqueue to wait on
682  * @condition: a C expression for the event to wait for
683  * @timeout: timeout, in jiffies
684  *
685  * The process is put to sleep (TASK_IDLE) until the
686  * @condition evaluates to true. The @condition is checked each time
687  * the waitqueue @wq_head is woken up.
688  *
689  * wake_up() has to be called after changing any variable that could
690  * change the result of the wait condition.
691  *
692  * Returns:
693  * 0 if the @condition evaluated to %false after the @timeout elapsed,
694  * 1 if the @condition evaluated to %true after the @timeout elapsed,
695  * or the remaining jiffies (at least 1) if the @condition evaluated
696  * to %true before the @timeout elapsed.
697  */
698 #define wait_event_idle_timeout(wq_head, condition, timeout)                    \
699 ({                                                                              \
700         long __ret = timeout;                                                   \
701         might_sleep();                                                          \
702         if (!___wait_cond_timeout(condition))                                   \
703                 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
704         __ret;                                                                  \
705 })
706
707 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)        \
708         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
709                       TASK_IDLE, 1, timeout,                                    \
710                       __ret = schedule_timeout(__ret))
711
712 /**
713  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
714  * @wq_head: the waitqueue to wait on
715  * @condition: a C expression for the event to wait for
716  * @timeout: timeout, in jiffies
717  *
718  * The process is put to sleep (TASK_IDLE) until the
719  * @condition evaluates to true. The @condition is checked each time
720  * the waitqueue @wq_head is woken up.
721  *
722  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
723  * set thus if other processes wait on the same list, when this
724  * process is woken further processes are not considered.
725  *
726  * wake_up() has to be called after changing any variable that could
727  * change the result of the wait condition.
728  *
729  * Returns:
730  * 0 if the @condition evaluated to %false after the @timeout elapsed,
731  * 1 if the @condition evaluated to %true after the @timeout elapsed,
732  * or the remaining jiffies (at least 1) if the @condition evaluated
733  * to %true before the @timeout elapsed.
734  */
735 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)          \
736 ({                                                                              \
737         long __ret = timeout;                                                   \
738         might_sleep();                                                          \
739         if (!___wait_cond_timeout(condition))                                   \
740                 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
741         __ret;                                                                  \
742 })
743
744 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
745 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
746
747 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)         \
748 ({                                                                              \
749         int __ret;                                                              \
750         DEFINE_WAIT(__wait);                                                    \
751         if (exclusive)                                                          \
752                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                              \
753         do {                                                                    \
754                 __ret = fn(&(wq), &__wait);                                     \
755                 if (__ret)                                                      \
756                         break;                                                  \
757         } while (!(condition));                                                 \
758         __remove_wait_queue(&(wq), &__wait);                                    \
759         __set_current_state(TASK_RUNNING);                                      \
760         __ret;                                                                  \
761 })
762
763
764 /**
765  * wait_event_interruptible_locked - sleep until a condition gets true
766  * @wq: the waitqueue to wait on
767  * @condition: a C expression for the event to wait for
768  *
769  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
770  * @condition evaluates to true or a signal is received.
771  * The @condition is checked each time the waitqueue @wq is woken up.
772  *
773  * It must be called with wq.lock being held.  This spinlock is
774  * unlocked while sleeping but @condition testing is done while lock
775  * is held and when this macro exits the lock is held.
776  *
777  * The lock is locked/unlocked using spin_lock()/spin_unlock()
778  * functions which must match the way they are locked/unlocked outside
779  * of this macro.
780  *
781  * wake_up_locked() has to be called after changing any variable that could
782  * change the result of the wait condition.
783  *
784  * The function will return -ERESTARTSYS if it was interrupted by a
785  * signal and 0 if @condition evaluated to true.
786  */
787 #define wait_event_interruptible_locked(wq, condition)                          \
788         ((condition)                                                            \
789          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
790
791 /**
792  * wait_event_interruptible_locked_irq - sleep until a condition gets true
793  * @wq: the waitqueue to wait on
794  * @condition: a C expression for the event to wait for
795  *
796  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
797  * @condition evaluates to true or a signal is received.
798  * The @condition is checked each time the waitqueue @wq is woken up.
799  *
800  * It must be called with wq.lock being held.  This spinlock is
801  * unlocked while sleeping but @condition testing is done while lock
802  * is held and when this macro exits the lock is held.
803  *
804  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
805  * functions which must match the way they are locked/unlocked outside
806  * of this macro.
807  *
808  * wake_up_locked() has to be called after changing any variable that could
809  * change the result of the wait condition.
810  *
811  * The function will return -ERESTARTSYS if it was interrupted by a
812  * signal and 0 if @condition evaluated to true.
813  */
814 #define wait_event_interruptible_locked_irq(wq, condition)                      \
815         ((condition)                                                            \
816          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
817
818 /**
819  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
820  * @wq: the waitqueue to wait on
821  * @condition: a C expression for the event to wait for
822  *
823  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
824  * @condition evaluates to true or a signal is received.
825  * The @condition is checked each time the waitqueue @wq is woken up.
826  *
827  * It must be called with wq.lock being held.  This spinlock is
828  * unlocked while sleeping but @condition testing is done while lock
829  * is held and when this macro exits the lock is held.
830  *
831  * The lock is locked/unlocked using spin_lock()/spin_unlock()
832  * functions which must match the way they are locked/unlocked outside
833  * of this macro.
834  *
835  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
836  * set thus when other process waits process on the list if this
837  * process is awaken further processes are not considered.
838  *
839  * wake_up_locked() has to be called after changing any variable that could
840  * change the result of the wait condition.
841  *
842  * The function will return -ERESTARTSYS if it was interrupted by a
843  * signal and 0 if @condition evaluated to true.
844  */
845 #define wait_event_interruptible_exclusive_locked(wq, condition)                \
846         ((condition)                                                            \
847          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
848
849 /**
850  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
851  * @wq: the waitqueue to wait on
852  * @condition: a C expression for the event to wait for
853  *
854  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
855  * @condition evaluates to true or a signal is received.
856  * The @condition is checked each time the waitqueue @wq is woken up.
857  *
858  * It must be called with wq.lock being held.  This spinlock is
859  * unlocked while sleeping but @condition testing is done while lock
860  * is held and when this macro exits the lock is held.
861  *
862  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
863  * functions which must match the way they are locked/unlocked outside
864  * of this macro.
865  *
866  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
867  * set thus when other process waits process on the list if this
868  * process is awaken further processes are not considered.
869  *
870  * wake_up_locked() has to be called after changing any variable that could
871  * change the result of the wait condition.
872  *
873  * The function will return -ERESTARTSYS if it was interrupted by a
874  * signal and 0 if @condition evaluated to true.
875  */
876 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)            \
877         ((condition)                                                            \
878          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
879
880
881 #define __wait_event_killable(wq, condition)                                    \
882         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
883
884 /**
885  * wait_event_killable - sleep until a condition gets true
886  * @wq_head: the waitqueue to wait on
887  * @condition: a C expression for the event to wait for
888  *
889  * The process is put to sleep (TASK_KILLABLE) until the
890  * @condition evaluates to true or a signal is received.
891  * The @condition is checked each time the waitqueue @wq_head is woken up.
892  *
893  * wake_up() has to be called after changing any variable that could
894  * change the result of the wait condition.
895  *
896  * The function will return -ERESTARTSYS if it was interrupted by a
897  * signal and 0 if @condition evaluated to true.
898  */
899 #define wait_event_killable(wq_head, condition)                                 \
900 ({                                                                              \
901         int __ret = 0;                                                          \
902         might_sleep();                                                          \
903         if (!(condition))                                                       \
904                 __ret = __wait_event_killable(wq_head, condition);              \
905         __ret;                                                                  \
906 })
907
908 #define __wait_event_killable_timeout(wq_head, condition, timeout)              \
909         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
910                       TASK_KILLABLE, 0, timeout,                                \
911                       __ret = schedule_timeout(__ret))
912
913 /**
914  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
915  * @wq_head: the waitqueue to wait on
916  * @condition: a C expression for the event to wait for
917  * @timeout: timeout, in jiffies
918  *
919  * The process is put to sleep (TASK_KILLABLE) until the
920  * @condition evaluates to true or a kill signal is received.
921  * The @condition is checked each time the waitqueue @wq_head is woken up.
922  *
923  * wake_up() has to be called after changing any variable that could
924  * change the result of the wait condition.
925  *
926  * Returns:
927  * 0 if the @condition evaluated to %false after the @timeout elapsed,
928  * 1 if the @condition evaluated to %true after the @timeout elapsed,
929  * the remaining jiffies (at least 1) if the @condition evaluated
930  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
931  * interrupted by a kill signal.
932  *
933  * Only kill signals interrupt this process.
934  */
935 #define wait_event_killable_timeout(wq_head, condition, timeout)                \
936 ({                                                                              \
937         long __ret = timeout;                                                   \
938         might_sleep();                                                          \
939         if (!___wait_cond_timeout(condition))                                   \
940                 __ret = __wait_event_killable_timeout(wq_head,                  \
941                                                 condition, timeout);            \
942         __ret;                                                                  \
943 })
944
945
946 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)                    \
947         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
948                             spin_unlock_irq(&lock);                             \
949                             cmd;                                                \
950                             schedule();                                         \
951                             spin_lock_irq(&lock))
952
953 /**
954  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
955  *                           condition is checked under the lock. This
956  *                           is expected to be called with the lock
957  *                           taken.
958  * @wq_head: the waitqueue to wait on
959  * @condition: a C expression for the event to wait for
960  * @lock: a locked spinlock_t, which will be released before cmd
961  *        and schedule() and reacquired afterwards.
962  * @cmd: a command which is invoked outside the critical section before
963  *       sleep
964  *
965  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
966  * @condition evaluates to true. The @condition is checked each time
967  * the waitqueue @wq_head is woken up.
968  *
969  * wake_up() has to be called after changing any variable that could
970  * change the result of the wait condition.
971  *
972  * This is supposed to be called while holding the lock. The lock is
973  * dropped before invoking the cmd and going to sleep and is reacquired
974  * afterwards.
975  */
976 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)                  \
977 do {                                                                            \
978         if (condition)                                                          \
979                 break;                                                          \
980         __wait_event_lock_irq(wq_head, condition, lock, cmd);                   \
981 } while (0)
982
983 /**
984  * wait_event_lock_irq - sleep until a condition gets true. The
985  *                       condition is checked under the lock. This
986  *                       is expected to be called with the lock
987  *                       taken.
988  * @wq_head: the waitqueue to wait on
989  * @condition: a C expression for the event to wait for
990  * @lock: a locked spinlock_t, which will be released before schedule()
991  *        and reacquired afterwards.
992  *
993  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
994  * @condition evaluates to true. The @condition is checked each time
995  * the waitqueue @wq_head is woken up.
996  *
997  * wake_up() has to be called after changing any variable that could
998  * change the result of the wait condition.
999  *
1000  * This is supposed to be called while holding the lock. The lock is
1001  * dropped before going to sleep and is reacquired afterwards.
1002  */
1003 #define wait_event_lock_irq(wq_head, condition, lock)                           \
1004 do {                                                                            \
1005         if (condition)                                                          \
1006                 break;                                                          \
1007         __wait_event_lock_irq(wq_head, condition, lock, );                      \
1008 } while (0)
1009
1010
1011 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)      \
1012         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
1013                       spin_unlock_irq(&lock);                                   \
1014                       cmd;                                                      \
1015                       schedule();                                               \
1016                       spin_lock_irq(&lock))
1017
1018 /**
1019  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1020  *              The condition is checked under the lock. This is expected to
1021  *              be called with the lock taken.
1022  * @wq_head: the waitqueue to wait on
1023  * @condition: a C expression for the event to wait for
1024  * @lock: a locked spinlock_t, which will be released before cmd and
1025  *        schedule() and reacquired afterwards.
1026  * @cmd: a command which is invoked outside the critical section before
1027  *       sleep
1028  *
1029  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1030  * @condition evaluates to true or a signal is received. The @condition is
1031  * checked each time the waitqueue @wq_head is woken up.
1032  *
1033  * wake_up() has to be called after changing any variable that could
1034  * change the result of the wait condition.
1035  *
1036  * This is supposed to be called while holding the lock. The lock is
1037  * dropped before invoking the cmd and going to sleep and is reacquired
1038  * afterwards.
1039  *
1040  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1041  * and 0 if @condition evaluated to true.
1042  */
1043 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)    \
1044 ({                                                                              \
1045         int __ret = 0;                                                          \
1046         if (!(condition))                                                       \
1047                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
1048                                                 condition, lock, cmd);          \
1049         __ret;                                                                  \
1050 })
1051
1052 /**
1053  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1054  *              The condition is checked under the lock. This is expected
1055  *              to be called with the lock taken.
1056  * @wq_head: the waitqueue to wait on
1057  * @condition: a C expression for the event to wait for
1058  * @lock: a locked spinlock_t, which will be released before schedule()
1059  *        and reacquired afterwards.
1060  *
1061  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1062  * @condition evaluates to true or signal is received. The @condition is
1063  * checked each time the waitqueue @wq_head is woken up.
1064  *
1065  * wake_up() has to be called after changing any variable that could
1066  * change the result of the wait condition.
1067  *
1068  * This is supposed to be called while holding the lock. The lock is
1069  * dropped before going to sleep and is reacquired afterwards.
1070  *
1071  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1072  * and 0 if @condition evaluated to true.
1073  */
1074 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)             \
1075 ({                                                                              \
1076         int __ret = 0;                                                          \
1077         if (!(condition))                                                       \
1078                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
1079                                                 condition, lock,);              \
1080         __ret;                                                                  \
1081 })
1082
1083 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1084         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
1085                       state, 0, timeout,                                        \
1086                       spin_unlock_irq(&lock);                                   \
1087                       __ret = schedule_timeout(__ret);                          \
1088                       spin_lock_irq(&lock));
1089
1090 /**
1091  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1092  *              true or a timeout elapses. The condition is checked under
1093  *              the lock. This is expected to be called with the lock taken.
1094  * @wq_head: the waitqueue to wait on
1095  * @condition: a C expression for the event to wait for
1096  * @lock: a locked spinlock_t, which will be released before schedule()
1097  *        and reacquired afterwards.
1098  * @timeout: timeout, in jiffies
1099  *
1100  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1101  * @condition evaluates to true or signal is received. The @condition is
1102  * checked each time the waitqueue @wq_head is woken up.
1103  *
1104  * wake_up() has to be called after changing any variable that could
1105  * change the result of the wait condition.
1106  *
1107  * This is supposed to be called while holding the lock. The lock is
1108  * dropped before going to sleep and is reacquired afterwards.
1109  *
1110  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1111  * was interrupted by a signal, and the remaining jiffies otherwise
1112  * if the condition evaluated to true before the timeout elapsed.
1113  */
1114 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,     \
1115                                                   timeout)                      \
1116 ({                                                                              \
1117         long __ret = timeout;                                                   \
1118         if (!___wait_cond_timeout(condition))                                   \
1119                 __ret = __wait_event_lock_irq_timeout(                          \
1120                                         wq_head, condition, lock, timeout,      \
1121                                         TASK_INTERRUPTIBLE);                    \
1122         __ret;                                                                  \
1123 })
1124
1125 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)          \
1126 ({                                                                              \
1127         long __ret = timeout;                                                   \
1128         if (!___wait_cond_timeout(condition))                                   \
1129                 __ret = __wait_event_lock_irq_timeout(                          \
1130                                         wq_head, condition, lock, timeout,      \
1131                                         TASK_UNINTERRUPTIBLE);                  \
1132         __ret;                                                                  \
1133 })
1134
1135 /*
1136  * Waitqueues which are removed from the waitqueue_head at wakeup time
1137  */
1138 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1139 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1140 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1141 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1142 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1143 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1144 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1145
1146 #define DEFINE_WAIT_FUNC(name, function)                                        \
1147         struct wait_queue_entry name = {                                        \
1148                 .private        = current,                                      \
1149                 .func           = function,                                     \
1150                 .entry          = LIST_HEAD_INIT((name).entry),                 \
1151         }
1152
1153 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1154
1155 #define init_wait(wait)                                                         \
1156         do {                                                                    \
1157                 (wait)->private = current;                                      \
1158                 (wait)->func = autoremove_wake_function;                        \
1159                 INIT_LIST_HEAD(&(wait)->entry);                                 \
1160                 (wait)->flags = 0;                                              \
1161         } while (0)
1162
1163 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1164
1165 #endif /* _LINUX_WAIT_H */