1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_WAIT_H
3  #define _LINUX_WAIT_H
4  /*
5   * Linux wait queue related types and methods
6   */
7  #include <linux/list.h>
8  #include <linux/stddef.h>
9  #include <linux/spinlock.h>
10  
11  #include <asm/current.h>
12  
13  typedef struct wait_queue_entry wait_queue_entry_t;
14  
15  typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16  int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17  
18  /* wait_queue_entry::flags */
19  #define WQ_FLAG_EXCLUSIVE	0x01
20  #define WQ_FLAG_WOKEN		0x02
21  #define WQ_FLAG_CUSTOM		0x04
22  #define WQ_FLAG_DONE		0x08
23  #define WQ_FLAG_PRIORITY	0x10
24  
25  /*
26   * A single wait-queue entry structure:
27   */
28  struct wait_queue_entry {
29  	unsigned int		flags;
30  	void			*private;
31  	wait_queue_func_t	func;
32  	struct list_head	entry;
33  };
34  
35  struct wait_queue_head {
36  	spinlock_t		lock;
37  	struct list_head	head;
38  };
39  typedef struct wait_queue_head wait_queue_head_t;
40  
41  struct task_struct;
42  
43  /*
44   * Macros for declaration and initialisaton of the datatypes
45   */
46  
47  #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
48  	.private	= tsk,							\
49  	.func		= default_wake_function,				\
50  	.entry		= { NULL, NULL } }
51  
52  #define DECLARE_WAITQUEUE(name, tsk)						\
53  	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
54  
55  #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
56  	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
57  	.head		= LIST_HEAD_INIT(name.head) }
58  
59  #define DECLARE_WAIT_QUEUE_HEAD(name) \
60  	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61  
62  extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
63  
64  #define init_waitqueue_head(wq_head)						\
65  	do {									\
66  		static struct lock_class_key __key;				\
67  										\
68  		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
69  	} while (0)
70  
71  #ifdef CONFIG_LOCKDEP
72  # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
73  	({ init_waitqueue_head(&name); name; })
74  # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
75  	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
76  #else
77  # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
78  #endif
79  
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)80  static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
81  {
82  	wq_entry->flags		= 0;
83  	wq_entry->private	= p;
84  	wq_entry->func		= default_wake_function;
85  }
86  
87  static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)88  init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
89  {
90  	wq_entry->flags		= 0;
91  	wq_entry->private	= NULL;
92  	wq_entry->func		= func;
93  }
94  
95  /**
96   * waitqueue_active -- locklessly test for waiters on the queue
97   * @wq_head: the waitqueue to test for waiters
98   *
99   * returns true if the wait list is not empty
100   *
101   * NOTE: this function is lockless and requires care, incorrect usage _will_
102   * lead to sporadic and non-obvious failure.
103   *
104   * Use either while holding wait_queue_head::lock or when used for wakeups
105   * with an extra smp_mb() like::
106   *
107   *      CPU0 - waker                    CPU1 - waiter
108   *
109   *                                      for (;;) {
110   *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
111   *      smp_mb();                         // smp_mb() from set_current_state()
112   *      if (waitqueue_active(wq_head))         if (@cond)
113   *        wake_up(wq_head);                      break;
114   *                                        schedule();
115   *                                      }
116   *                                      finish_wait(&wq_head, &wait);
117   *
118   * Because without the explicit smp_mb() it's possible for the
119   * waitqueue_active() load to get hoisted over the @cond store such that we'll
120   * observe an empty wait list while the waiter might not observe @cond.
121   *
122   * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
123   * which (when the lock is uncontended) are of roughly equal cost.
124   */
waitqueue_active(struct wait_queue_head * wq_head)125  static inline int waitqueue_active(struct wait_queue_head *wq_head)
126  {
127  	return !list_empty(&wq_head->head);
128  }
129  
130  /**
131   * wq_has_single_sleeper - check if there is only one sleeper
132   * @wq_head: wait queue head
133   *
134   * Returns true of wq_head has only one sleeper on the list.
135   *
136   * Please refer to the comment for waitqueue_active.
137   */
wq_has_single_sleeper(struct wait_queue_head * wq_head)138  static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
139  {
140  	return list_is_singular(&wq_head->head);
141  }
142  
143  /**
144   * wq_has_sleeper - check if there are any waiting processes
145   * @wq_head: wait queue head
146   *
147   * Returns true if wq_head has waiting processes
148   *
149   * Please refer to the comment for waitqueue_active.
150   */
wq_has_sleeper(struct wait_queue_head * wq_head)151  static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
152  {
153  	/*
154  	 * We need to be sure we are in sync with the
155  	 * add_wait_queue modifications to the wait queue.
156  	 *
157  	 * This memory barrier should be paired with one on the
158  	 * waiting side.
159  	 */
160  	smp_mb();
161  	return waitqueue_active(wq_head);
162  }
163  
164  extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
165  extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166  extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167  extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168  
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169  static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170  {
171  	struct list_head *head = &wq_head->head;
172  	struct wait_queue_entry *wq;
173  
174  	list_for_each_entry(wq, &wq_head->head, entry) {
175  		if (!(wq->flags & WQ_FLAG_PRIORITY))
176  			break;
177  		head = &wq->entry;
178  	}
179  	list_add(&wq_entry->entry, head);
180  }
181  
182  /*
183   * Used for wake-one threads:
184   */
185  static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)186  __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
187  {
188  	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
189  	__add_wait_queue(wq_head, wq_entry);
190  }
191  
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)192  static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
193  {
194  	list_add_tail(&wq_entry->entry, &wq_head->head);
195  }
196  
197  static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)198  __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
199  {
200  	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
201  	__add_wait_queue_entry_tail(wq_head, wq_entry);
202  }
203  
204  static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)205  __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
206  {
207  	list_del(&wq_entry->entry);
208  }
209  
210  int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
211  void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
212  void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
213  void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214  void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
215  void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
216  void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
217  void __wake_up_pollfree(struct wait_queue_head *wq_head);
218  
219  #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
220  #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
221  #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
222  #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
223  #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
224  
225  #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
226  #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
227  #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
228  #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
229  
230  /*
231   * Wakeup macros to be used to report events to the targets.
232   */
233  #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
234  #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
235  #define wake_up_poll(x, m)							\
236  	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
237  #define wake_up_poll_on_current_cpu(x, m)					\
238  	__wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
239  #define wake_up_locked_poll(x, m)						\
240  	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
241  #define wake_up_interruptible_poll(x, m)					\
242  	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
243  #define wake_up_interruptible_sync_poll(x, m)					\
244  	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
245  #define wake_up_interruptible_sync_poll_locked(x, m)				\
246  	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
247  
248  /**
249   * wake_up_pollfree - signal that a polled waitqueue is going away
250   * @wq_head: the wait queue head
251   *
252   * In the very rare cases where a ->poll() implementation uses a waitqueue whose
253   * lifetime is tied to a task rather than to the 'struct file' being polled,
254   * this function must be called before the waitqueue is freed so that
255   * non-blocking polls (e.g. epoll) are notified that the queue is going away.
256   *
257   * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
258   * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
259   */
wake_up_pollfree(struct wait_queue_head * wq_head)260  static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
261  {
262  	/*
263  	 * For performance reasons, we don't always take the queue lock here.
264  	 * Therefore, we might race with someone removing the last entry from
265  	 * the queue, and proceed while they still hold the queue lock.
266  	 * However, rcu_read_lock() is required to be held in such cases, so we
267  	 * can safely proceed with an RCU-delayed free.
268  	 */
269  	if (waitqueue_active(wq_head))
270  		__wake_up_pollfree(wq_head);
271  }
272  
273  #define ___wait_cond_timeout(condition)						\
274  ({										\
275  	bool __cond = (condition);						\
276  	if (__cond && !__ret)							\
277  		__ret = 1;							\
278  	__cond || !__ret;							\
279  })
280  
281  #define ___wait_is_interruptible(state)						\
282  	(!__builtin_constant_p(state) ||					\
283  	 (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
284  
285  extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
286  
287  /*
288   * The below macro ___wait_event() has an explicit shadow of the __ret
289   * variable when used from the wait_event_*() macros.
290   *
291   * This is so that both can use the ___wait_cond_timeout() construct
292   * to wrap the condition.
293   *
294   * The type inconsistency of the wait_event_*() __ret variable is also
295   * on purpose; we use long where we can return timeout values and int
296   * otherwise.
297   */
298  
299  #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
300  ({										\
301  	__label__ __out;							\
302  	struct wait_queue_entry __wq_entry;					\
303  	long __ret = ret;	/* explicit shadow */				\
304  										\
305  	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
306  	for (;;) {								\
307  		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
308  										\
309  		if (condition)							\
310  			break;							\
311  										\
312  		if (___wait_is_interruptible(state) && __int) {			\
313  			__ret = __int;						\
314  			goto __out;						\
315  		}								\
316  										\
317  		cmd;								\
318  	}									\
319  	finish_wait(&wq_head, &__wq_entry);					\
320  __out:	__ret;									\
321  })
322  
323  #define __wait_event(wq_head, condition)					\
324  	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
325  			    schedule())
326  
327  /**
328   * wait_event - sleep until a condition gets true
329   * @wq_head: the waitqueue to wait on
330   * @condition: a C expression for the event to wait for
331   *
332   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
333   * @condition evaluates to true. The @condition is checked each time
334   * the waitqueue @wq_head is woken up.
335   *
336   * wake_up() has to be called after changing any variable that could
337   * change the result of the wait condition.
338   */
339  #define wait_event(wq_head, condition)						\
340  do {										\
341  	might_sleep();								\
342  	if (condition)								\
343  		break;								\
344  	__wait_event(wq_head, condition);					\
345  } while (0)
346  
347  #define __io_wait_event(wq_head, condition)					\
348  	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
349  			    io_schedule())
350  
351  /*
352   * io_wait_event() -- like wait_event() but with io_schedule()
353   */
354  #define io_wait_event(wq_head, condition)					\
355  do {										\
356  	might_sleep();								\
357  	if (condition)								\
358  		break;								\
359  	__io_wait_event(wq_head, condition);					\
360  } while (0)
361  
362  #define __wait_event_freezable(wq_head, condition)				\
363  	___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE),	\
364  			0, 0, schedule())
365  
366  /**
367   * wait_event_freezable - sleep (or freeze) until a condition gets true
368   * @wq_head: the waitqueue to wait on
369   * @condition: a C expression for the event to wait for
370   *
371   * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
372   * to system load) until the @condition evaluates to true. The
373   * @condition is checked each time the waitqueue @wq_head is woken up.
374   *
375   * wake_up() has to be called after changing any variable that could
376   * change the result of the wait condition.
377   */
378  #define wait_event_freezable(wq_head, condition)				\
379  ({										\
380  	int __ret = 0;								\
381  	might_sleep();								\
382  	if (!(condition))							\
383  		__ret = __wait_event_freezable(wq_head, condition);		\
384  	__ret;									\
385  })
386  
387  #define __wait_event_timeout(wq_head, condition, timeout)			\
388  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
389  		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
390  		      __ret = schedule_timeout(__ret))
391  
392  /**
393   * wait_event_timeout - sleep until a condition gets true or a timeout elapses
394   * @wq_head: the waitqueue to wait on
395   * @condition: a C expression for the event to wait for
396   * @timeout: timeout, in jiffies
397   *
398   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
399   * @condition evaluates to true. The @condition is checked each time
400   * the waitqueue @wq_head is woken up.
401   *
402   * wake_up() has to be called after changing any variable that could
403   * change the result of the wait condition.
404   *
405   * Returns:
406   * 0 if the @condition evaluated to %false after the @timeout elapsed,
407   * 1 if the @condition evaluated to %true after the @timeout elapsed,
408   * or the remaining jiffies (at least 1) if the @condition evaluated
409   * to %true before the @timeout elapsed.
410   */
411  #define wait_event_timeout(wq_head, condition, timeout)				\
412  ({										\
413  	long __ret = timeout;							\
414  	might_sleep();								\
415  	if (!___wait_cond_timeout(condition))					\
416  		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
417  	__ret;									\
418  })
419  
420  #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
421  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
422  		      (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout,		\
423  		      __ret = schedule_timeout(__ret))
424  
425  /*
426   * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
427   * increasing load and is freezable.
428   */
429  #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
430  ({										\
431  	long __ret = timeout;							\
432  	might_sleep();								\
433  	if (!___wait_cond_timeout(condition))					\
434  		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
435  	__ret;									\
436  })
437  
438  #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
439  	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
440  			    cmd1; schedule(); cmd2)
441  /*
442   * Just like wait_event_cmd(), except it sets exclusive flag
443   */
444  #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
445  do {										\
446  	if (condition)								\
447  		break;								\
448  	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
449  } while (0)
450  
451  #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
452  	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
453  			    cmd1; schedule(); cmd2)
454  
455  /**
456   * wait_event_cmd - sleep until a condition gets true
457   * @wq_head: the waitqueue to wait on
458   * @condition: a C expression for the event to wait for
459   * @cmd1: the command will be executed before sleep
460   * @cmd2: the command will be executed after sleep
461   *
462   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
463   * @condition evaluates to true. The @condition is checked each time
464   * the waitqueue @wq_head is woken up.
465   *
466   * wake_up() has to be called after changing any variable that could
467   * change the result of the wait condition.
468   */
469  #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
470  do {										\
471  	if (condition)								\
472  		break;								\
473  	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
474  } while (0)
475  
476  #define __wait_event_interruptible(wq_head, condition)				\
477  	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
478  		      schedule())
479  
480  /**
481   * wait_event_interruptible - sleep until a condition gets true
482   * @wq_head: the waitqueue to wait on
483   * @condition: a C expression for the event to wait for
484   *
485   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486   * @condition evaluates to true or a signal is received.
487   * The @condition is checked each time the waitqueue @wq_head is woken up.
488   *
489   * wake_up() has to be called after changing any variable that could
490   * change the result of the wait condition.
491   *
492   * The function will return -ERESTARTSYS if it was interrupted by a
493   * signal and 0 if @condition evaluated to true.
494   */
495  #define wait_event_interruptible(wq_head, condition)				\
496  ({										\
497  	int __ret = 0;								\
498  	might_sleep();								\
499  	if (!(condition))							\
500  		__ret = __wait_event_interruptible(wq_head, condition);		\
501  	__ret;									\
502  })
503  
504  #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
505  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
506  		      TASK_INTERRUPTIBLE, 0, timeout,				\
507  		      __ret = schedule_timeout(__ret))
508  
509  /**
510   * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
511   * @wq_head: the waitqueue to wait on
512   * @condition: a C expression for the event to wait for
513   * @timeout: timeout, in jiffies
514   *
515   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
516   * @condition evaluates to true or a signal is received.
517   * The @condition is checked each time the waitqueue @wq_head is woken up.
518   *
519   * wake_up() has to be called after changing any variable that could
520   * change the result of the wait condition.
521   *
522   * Returns:
523   * 0 if the @condition evaluated to %false after the @timeout elapsed,
524   * 1 if the @condition evaluated to %true after the @timeout elapsed,
525   * the remaining jiffies (at least 1) if the @condition evaluated
526   * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
527   * interrupted by a signal.
528   */
529  #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
530  ({										\
531  	long __ret = timeout;							\
532  	might_sleep();								\
533  	if (!___wait_cond_timeout(condition))					\
534  		__ret = __wait_event_interruptible_timeout(wq_head,		\
535  						condition, timeout);		\
536  	__ret;									\
537  })
538  
539  #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
540  ({										\
541  	int __ret = 0;								\
542  	struct hrtimer_sleeper __t;						\
543  										\
544  	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
545  				      HRTIMER_MODE_REL);			\
546  	if ((timeout) != KTIME_MAX) {						\
547  		hrtimer_set_expires_range_ns(&__t.timer, timeout,		\
548  					current->timer_slack_ns);		\
549  		hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);		\
550  	}									\
551  										\
552  	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
553  		if (!__t.task) {						\
554  			__ret = -ETIME;						\
555  			break;							\
556  		}								\
557  		schedule());							\
558  										\
559  	hrtimer_cancel(&__t.timer);						\
560  	destroy_hrtimer_on_stack(&__t.timer);					\
561  	__ret;									\
562  })
563  
564  /**
565   * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
566   * @wq_head: the waitqueue to wait on
567   * @condition: a C expression for the event to wait for
568   * @timeout: timeout, as a ktime_t
569   *
570   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
571   * @condition evaluates to true or a signal is received.
572   * The @condition is checked each time the waitqueue @wq_head is woken up.
573   *
574   * wake_up() has to be called after changing any variable that could
575   * change the result of the wait condition.
576   *
577   * The function returns 0 if @condition became true, or -ETIME if the timeout
578   * elapsed.
579   */
580  #define wait_event_hrtimeout(wq_head, condition, timeout)			\
581  ({										\
582  	int __ret = 0;								\
583  	might_sleep();								\
584  	if (!(condition))							\
585  		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
586  					       TASK_UNINTERRUPTIBLE);		\
587  	__ret;									\
588  })
589  
590  /**
591   * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
592   * @wq: the waitqueue to wait on
593   * @condition: a C expression for the event to wait for
594   * @timeout: timeout, as a ktime_t
595   *
596   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
597   * @condition evaluates to true or a signal is received.
598   * The @condition is checked each time the waitqueue @wq is woken up.
599   *
600   * wake_up() has to be called after changing any variable that could
601   * change the result of the wait condition.
602   *
603   * The function returns 0 if @condition became true, -ERESTARTSYS if it was
604   * interrupted by a signal, or -ETIME if the timeout elapsed.
605   */
606  #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
607  ({										\
608  	long __ret = 0;								\
609  	might_sleep();								\
610  	if (!(condition))							\
611  		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
612  					       TASK_INTERRUPTIBLE);		\
613  	__ret;									\
614  })
615  
616  #define __wait_event_interruptible_exclusive(wq, condition)			\
617  	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
618  		      schedule())
619  
620  #define wait_event_interruptible_exclusive(wq, condition)			\
621  ({										\
622  	int __ret = 0;								\
623  	might_sleep();								\
624  	if (!(condition))							\
625  		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
626  	__ret;									\
627  })
628  
629  #define __wait_event_killable_exclusive(wq, condition)				\
630  	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
631  		      schedule())
632  
633  #define wait_event_killable_exclusive(wq, condition)				\
634  ({										\
635  	int __ret = 0;								\
636  	might_sleep();								\
637  	if (!(condition))							\
638  		__ret = __wait_event_killable_exclusive(wq, condition);		\
639  	__ret;									\
640  })
641  
642  
643  #define __wait_event_freezable_exclusive(wq, condition)				\
644  	___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
645  			schedule())
646  
647  #define wait_event_freezable_exclusive(wq, condition)				\
648  ({										\
649  	int __ret = 0;								\
650  	might_sleep();								\
651  	if (!(condition))							\
652  		__ret = __wait_event_freezable_exclusive(wq, condition);	\
653  	__ret;									\
654  })
655  
656  /**
657   * wait_event_idle - wait for a condition without contributing to system load
658   * @wq_head: the waitqueue to wait on
659   * @condition: a C expression for the event to wait for
660   *
661   * The process is put to sleep (TASK_IDLE) until the
662   * @condition evaluates to true.
663   * The @condition is checked each time the waitqueue @wq_head is woken up.
664   *
665   * wake_up() has to be called after changing any variable that could
666   * change the result of the wait condition.
667   *
668   */
669  #define wait_event_idle(wq_head, condition)					\
670  do {										\
671  	might_sleep();								\
672  	if (!(condition))							\
673  		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
674  } while (0)
675  
676  /**
677   * wait_event_idle_exclusive - wait for a condition with contributing to system load
678   * @wq_head: the waitqueue to wait on
679   * @condition: a C expression for the event to wait for
680   *
681   * The process is put to sleep (TASK_IDLE) until the
682   * @condition evaluates to true.
683   * The @condition is checked each time the waitqueue @wq_head is woken up.
684   *
685   * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
686   * set thus if other processes wait on the same list, when this
687   * process is woken further processes are not considered.
688   *
689   * wake_up() has to be called after changing any variable that could
690   * change the result of the wait condition.
691   *
692   */
693  #define wait_event_idle_exclusive(wq_head, condition)				\
694  do {										\
695  	might_sleep();								\
696  	if (!(condition))							\
697  		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
698  } while (0)
699  
700  #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
701  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
702  		      TASK_IDLE, 0, timeout,					\
703  		      __ret = schedule_timeout(__ret))
704  
705  /**
706   * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
707   * @wq_head: the waitqueue to wait on
708   * @condition: a C expression for the event to wait for
709   * @timeout: timeout, in jiffies
710   *
711   * The process is put to sleep (TASK_IDLE) until the
712   * @condition evaluates to true. The @condition is checked each time
713   * the waitqueue @wq_head is woken up.
714   *
715   * wake_up() has to be called after changing any variable that could
716   * change the result of the wait condition.
717   *
718   * Returns:
719   * 0 if the @condition evaluated to %false after the @timeout elapsed,
720   * 1 if the @condition evaluated to %true after the @timeout elapsed,
721   * or the remaining jiffies (at least 1) if the @condition evaluated
722   * to %true before the @timeout elapsed.
723   */
724  #define wait_event_idle_timeout(wq_head, condition, timeout)			\
725  ({										\
726  	long __ret = timeout;							\
727  	might_sleep();								\
728  	if (!___wait_cond_timeout(condition))					\
729  		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
730  	__ret;									\
731  })
732  
733  #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
734  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
735  		      TASK_IDLE, 1, timeout,					\
736  		      __ret = schedule_timeout(__ret))
737  
738  /**
739   * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
740   * @wq_head: the waitqueue to wait on
741   * @condition: a C expression for the event to wait for
742   * @timeout: timeout, in jiffies
743   *
744   * The process is put to sleep (TASK_IDLE) until the
745   * @condition evaluates to true. The @condition is checked each time
746   * the waitqueue @wq_head is woken up.
747   *
748   * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
749   * set thus if other processes wait on the same list, when this
750   * process is woken further processes are not considered.
751   *
752   * wake_up() has to be called after changing any variable that could
753   * change the result of the wait condition.
754   *
755   * Returns:
756   * 0 if the @condition evaluated to %false after the @timeout elapsed,
757   * 1 if the @condition evaluated to %true after the @timeout elapsed,
758   * or the remaining jiffies (at least 1) if the @condition evaluated
759   * to %true before the @timeout elapsed.
760   */
761  #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
762  ({										\
763  	long __ret = timeout;							\
764  	might_sleep();								\
765  	if (!___wait_cond_timeout(condition))					\
766  		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
767  	__ret;									\
768  })
769  
770  extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
771  extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
772  
773  #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
774  ({										\
775  	int __ret;								\
776  	DEFINE_WAIT(__wait);							\
777  	if (exclusive)								\
778  		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
779  	do {									\
780  		__ret = fn(&(wq), &__wait);					\
781  		if (__ret)							\
782  			break;							\
783  	} while (!(condition));							\
784  	__remove_wait_queue(&(wq), &__wait);					\
785  	__set_current_state(TASK_RUNNING);					\
786  	__ret;									\
787  })
788  
789  
790  /**
791   * wait_event_interruptible_locked - sleep until a condition gets true
792   * @wq: the waitqueue to wait on
793   * @condition: a C expression for the event to wait for
794   *
795   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
796   * @condition evaluates to true or a signal is received.
797   * The @condition is checked each time the waitqueue @wq is woken up.
798   *
799   * It must be called with wq.lock being held.  This spinlock is
800   * unlocked while sleeping but @condition testing is done while lock
801   * is held and when this macro exits the lock is held.
802   *
803   * The lock is locked/unlocked using spin_lock()/spin_unlock()
804   * functions which must match the way they are locked/unlocked outside
805   * of this macro.
806   *
807   * wake_up_locked() has to be called after changing any variable that could
808   * change the result of the wait condition.
809   *
810   * The function will return -ERESTARTSYS if it was interrupted by a
811   * signal and 0 if @condition evaluated to true.
812   */
813  #define wait_event_interruptible_locked(wq, condition)				\
814  	((condition)								\
815  	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
816  
817  /**
818   * wait_event_interruptible_locked_irq - sleep until a condition gets true
819   * @wq: the waitqueue to wait on
820   * @condition: a C expression for the event to wait for
821   *
822   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
823   * @condition evaluates to true or a signal is received.
824   * The @condition is checked each time the waitqueue @wq is woken up.
825   *
826   * It must be called with wq.lock being held.  This spinlock is
827   * unlocked while sleeping but @condition testing is done while lock
828   * is held and when this macro exits the lock is held.
829   *
830   * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
831   * functions which must match the way they are locked/unlocked outside
832   * of this macro.
833   *
834   * wake_up_locked() has to be called after changing any variable that could
835   * change the result of the wait condition.
836   *
837   * The function will return -ERESTARTSYS if it was interrupted by a
838   * signal and 0 if @condition evaluated to true.
839   */
840  #define wait_event_interruptible_locked_irq(wq, condition)			\
841  	((condition)								\
842  	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
843  
844  /**
845   * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
846   * @wq: the waitqueue to wait on
847   * @condition: a C expression for the event to wait for
848   *
849   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
850   * @condition evaluates to true or a signal is received.
851   * The @condition is checked each time the waitqueue @wq is woken up.
852   *
853   * It must be called with wq.lock being held.  This spinlock is
854   * unlocked while sleeping but @condition testing is done while lock
855   * is held and when this macro exits the lock is held.
856   *
857   * The lock is locked/unlocked using spin_lock()/spin_unlock()
858   * functions which must match the way they are locked/unlocked outside
859   * of this macro.
860   *
861   * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
862   * set thus when other process waits process on the list if this
863   * process is awaken further processes are not considered.
864   *
865   * wake_up_locked() has to be called after changing any variable that could
866   * change the result of the wait condition.
867   *
868   * The function will return -ERESTARTSYS if it was interrupted by a
869   * signal and 0 if @condition evaluated to true.
870   */
871  #define wait_event_interruptible_exclusive_locked(wq, condition)		\
872  	((condition)								\
873  	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
874  
875  /**
876   * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
877   * @wq: the waitqueue to wait on
878   * @condition: a C expression for the event to wait for
879   *
880   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
881   * @condition evaluates to true or a signal is received.
882   * The @condition is checked each time the waitqueue @wq is woken up.
883   *
884   * It must be called with wq.lock being held.  This spinlock is
885   * unlocked while sleeping but @condition testing is done while lock
886   * is held and when this macro exits the lock is held.
887   *
888   * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
889   * functions which must match the way they are locked/unlocked outside
890   * of this macro.
891   *
892   * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
893   * set thus when other process waits process on the list if this
894   * process is awaken further processes are not considered.
895   *
896   * wake_up_locked() has to be called after changing any variable that could
897   * change the result of the wait condition.
898   *
899   * The function will return -ERESTARTSYS if it was interrupted by a
900   * signal and 0 if @condition evaluated to true.
901   */
902  #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
903  	((condition)								\
904  	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
905  
906  
907  #define __wait_event_killable(wq, condition)					\
908  	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
909  
910  /**
911   * wait_event_killable - sleep until a condition gets true
912   * @wq_head: the waitqueue to wait on
913   * @condition: a C expression for the event to wait for
914   *
915   * The process is put to sleep (TASK_KILLABLE) until the
916   * @condition evaluates to true or a signal is received.
917   * The @condition is checked each time the waitqueue @wq_head is woken up.
918   *
919   * wake_up() has to be called after changing any variable that could
920   * change the result of the wait condition.
921   *
922   * The function will return -ERESTARTSYS if it was interrupted by a
923   * signal and 0 if @condition evaluated to true.
924   */
925  #define wait_event_killable(wq_head, condition)					\
926  ({										\
927  	int __ret = 0;								\
928  	might_sleep();								\
929  	if (!(condition))							\
930  		__ret = __wait_event_killable(wq_head, condition);		\
931  	__ret;									\
932  })
933  
934  #define __wait_event_state(wq, condition, state)				\
935  	___wait_event(wq, condition, state, 0, 0, schedule())
936  
937  /**
938   * wait_event_state - sleep until a condition gets true
939   * @wq_head: the waitqueue to wait on
940   * @condition: a C expression for the event to wait for
941   * @state: state to sleep in
942   *
943   * The process is put to sleep (@state) until the @condition evaluates to true
944   * or a signal is received (when allowed by @state).  The @condition is checked
945   * each time the waitqueue @wq_head is woken up.
946   *
947   * wake_up() has to be called after changing any variable that could
948   * change the result of the wait condition.
949   *
950   * The function will return -ERESTARTSYS if it was interrupted by a signal
951   * (when allowed by @state) and 0 if @condition evaluated to true.
952   */
953  #define wait_event_state(wq_head, condition, state)				\
954  ({										\
955  	int __ret = 0;								\
956  	might_sleep();								\
957  	if (!(condition))							\
958  		__ret = __wait_event_state(wq_head, condition, state);		\
959  	__ret;									\
960  })
961  
962  #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
963  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
964  		      TASK_KILLABLE, 0, timeout,				\
965  		      __ret = schedule_timeout(__ret))
966  
967  /**
968   * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
969   * @wq_head: the waitqueue to wait on
970   * @condition: a C expression for the event to wait for
971   * @timeout: timeout, in jiffies
972   *
973   * The process is put to sleep (TASK_KILLABLE) until the
974   * @condition evaluates to true or a kill signal is received.
975   * The @condition is checked each time the waitqueue @wq_head is woken up.
976   *
977   * wake_up() has to be called after changing any variable that could
978   * change the result of the wait condition.
979   *
980   * Returns:
981   * 0 if the @condition evaluated to %false after the @timeout elapsed,
982   * 1 if the @condition evaluated to %true after the @timeout elapsed,
983   * the remaining jiffies (at least 1) if the @condition evaluated
984   * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
985   * interrupted by a kill signal.
986   *
987   * Only kill signals interrupt this process.
988   */
989  #define wait_event_killable_timeout(wq_head, condition, timeout)		\
990  ({										\
991  	long __ret = timeout;							\
992  	might_sleep();								\
993  	if (!___wait_cond_timeout(condition))					\
994  		__ret = __wait_event_killable_timeout(wq_head,			\
995  						condition, timeout);		\
996  	__ret;									\
997  })
998  
999  
1000  #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
1001  	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
1002  			    spin_unlock_irq(&lock);				\
1003  			    cmd;						\
1004  			    schedule();						\
1005  			    spin_lock_irq(&lock))
1006  
1007  /**
1008   * wait_event_lock_irq_cmd - sleep until a condition gets true. The
1009   *			     condition is checked under the lock. This
1010   *			     is expected to be called with the lock
1011   *			     taken.
1012   * @wq_head: the waitqueue to wait on
1013   * @condition: a C expression for the event to wait for
1014   * @lock: a locked spinlock_t, which will be released before cmd
1015   *	  and schedule() and reacquired afterwards.
1016   * @cmd: a command which is invoked outside the critical section before
1017   *	 sleep
1018   *
1019   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1020   * @condition evaluates to true. The @condition is checked each time
1021   * the waitqueue @wq_head is woken up.
1022   *
1023   * wake_up() has to be called after changing any variable that could
1024   * change the result of the wait condition.
1025   *
1026   * This is supposed to be called while holding the lock. The lock is
1027   * dropped before invoking the cmd and going to sleep and is reacquired
1028   * afterwards.
1029   */
1030  #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
1031  do {										\
1032  	if (condition)								\
1033  		break;								\
1034  	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
1035  } while (0)
1036  
1037  /**
1038   * wait_event_lock_irq - sleep until a condition gets true. The
1039   *			 condition is checked under the lock. This
1040   *			 is expected to be called with the lock
1041   *			 taken.
1042   * @wq_head: the waitqueue to wait on
1043   * @condition: a C expression for the event to wait for
1044   * @lock: a locked spinlock_t, which will be released before schedule()
1045   *	  and reacquired afterwards.
1046   *
1047   * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1048   * @condition evaluates to true. The @condition is checked each time
1049   * the waitqueue @wq_head is woken up.
1050   *
1051   * wake_up() has to be called after changing any variable that could
1052   * change the result of the wait condition.
1053   *
1054   * This is supposed to be called while holding the lock. The lock is
1055   * dropped before going to sleep and is reacquired afterwards.
1056   */
1057  #define wait_event_lock_irq(wq_head, condition, lock)				\
1058  do {										\
1059  	if (condition)								\
1060  		break;								\
1061  	__wait_event_lock_irq(wq_head, condition, lock, );			\
1062  } while (0)
1063  
1064  
1065  #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1066  	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1067  		      spin_unlock_irq(&lock);					\
1068  		      cmd;							\
1069  		      schedule();						\
1070  		      spin_lock_irq(&lock))
1071  
1072  /**
1073   * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1074   *		The condition is checked under the lock. This is expected to
1075   *		be called with the lock taken.
1076   * @wq_head: the waitqueue to wait on
1077   * @condition: a C expression for the event to wait for
1078   * @lock: a locked spinlock_t, which will be released before cmd and
1079   *	  schedule() and reacquired afterwards.
1080   * @cmd: a command which is invoked outside the critical section before
1081   *	 sleep
1082   *
1083   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1084   * @condition evaluates to true or a signal is received. The @condition is
1085   * checked each time the waitqueue @wq_head is woken up.
1086   *
1087   * wake_up() has to be called after changing any variable that could
1088   * change the result of the wait condition.
1089   *
1090   * This is supposed to be called while holding the lock. The lock is
1091   * dropped before invoking the cmd and going to sleep and is reacquired
1092   * afterwards.
1093   *
1094   * The macro will return -ERESTARTSYS if it was interrupted by a signal
1095   * and 0 if @condition evaluated to true.
1096   */
1097  #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1098  ({										\
1099  	int __ret = 0;								\
1100  	if (!(condition))							\
1101  		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1102  						condition, lock, cmd);		\
1103  	__ret;									\
1104  })
1105  
1106  /**
1107   * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1108   *		The condition is checked under the lock. This is expected
1109   *		to be called with the lock taken.
1110   * @wq_head: the waitqueue to wait on
1111   * @condition: a C expression for the event to wait for
1112   * @lock: a locked spinlock_t, which will be released before schedule()
1113   *	  and reacquired afterwards.
1114   *
1115   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1116   * @condition evaluates to true or signal is received. The @condition is
1117   * checked each time the waitqueue @wq_head is woken up.
1118   *
1119   * wake_up() has to be called after changing any variable that could
1120   * change the result of the wait condition.
1121   *
1122   * This is supposed to be called while holding the lock. The lock is
1123   * dropped before going to sleep and is reacquired afterwards.
1124   *
1125   * The macro will return -ERESTARTSYS if it was interrupted by a signal
1126   * and 0 if @condition evaluated to true.
1127   */
1128  #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1129  ({										\
1130  	int __ret = 0;								\
1131  	if (!(condition))							\
1132  		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1133  						condition, lock,);		\
1134  	__ret;									\
1135  })
1136  
1137  #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1138  	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1139  		      state, 0, timeout,					\
1140  		      spin_unlock_irq(&lock);					\
1141  		      __ret = schedule_timeout(__ret);				\
1142  		      spin_lock_irq(&lock));
1143  
1144  /**
1145   * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1146   *		true or a timeout elapses. The condition is checked under
1147   *		the lock. This is expected to be called with the lock taken.
1148   * @wq_head: the waitqueue to wait on
1149   * @condition: a C expression for the event to wait for
1150   * @lock: a locked spinlock_t, which will be released before schedule()
1151   *	  and reacquired afterwards.
1152   * @timeout: timeout, in jiffies
1153   *
1154   * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1155   * @condition evaluates to true or signal is received. The @condition is
1156   * checked each time the waitqueue @wq_head is woken up.
1157   *
1158   * wake_up() has to be called after changing any variable that could
1159   * change the result of the wait condition.
1160   *
1161   * This is supposed to be called while holding the lock. The lock is
1162   * dropped before going to sleep and is reacquired afterwards.
1163   *
1164   * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1165   * was interrupted by a signal, and the remaining jiffies otherwise
1166   * if the condition evaluated to true before the timeout elapsed.
1167   */
1168  #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1169  						  timeout)			\
1170  ({										\
1171  	long __ret = timeout;							\
1172  	if (!___wait_cond_timeout(condition))					\
1173  		__ret = __wait_event_lock_irq_timeout(				\
1174  					wq_head, condition, lock, timeout,	\
1175  					TASK_INTERRUPTIBLE);			\
1176  	__ret;									\
1177  })
1178  
1179  #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1180  ({										\
1181  	long __ret = timeout;							\
1182  	if (!___wait_cond_timeout(condition))					\
1183  		__ret = __wait_event_lock_irq_timeout(				\
1184  					wq_head, condition, lock, timeout,	\
1185  					TASK_UNINTERRUPTIBLE);			\
1186  	__ret;									\
1187  })
1188  
1189  /*
1190   * Waitqueues which are removed from the waitqueue_head at wakeup time
1191   */
1192  void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1193  bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1194  long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1195  void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1196  long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1197  int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1198  int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1199  
1200  #define DEFINE_WAIT_FUNC(name, function)					\
1201  	struct wait_queue_entry name = {					\
1202  		.private	= current,					\
1203  		.func		= function,					\
1204  		.entry		= LIST_HEAD_INIT((name).entry),			\
1205  	}
1206  
1207  #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1208  
1209  #define init_wait(wait)								\
1210  	do {									\
1211  		(wait)->private = current;					\
1212  		(wait)->func = autoremove_wake_function;			\
1213  		INIT_LIST_HEAD(&(wait)->entry);					\
1214  		(wait)->flags = 0;						\
1215  	} while (0)
1216  
1217  typedef int (*task_call_f)(struct task_struct *p, void *arg);
1218  extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1219  
1220  #endif /* _LINUX_WAIT_H */
1221