1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_POLL_H
3  #define _LINUX_POLL_H
4  
5  
6  #include <linux/compiler.h>
7  #include <linux/ktime.h>
8  #include <linux/wait.h>
9  #include <linux/string.h>
10  #include <linux/fs.h>
11  #include <linux/uaccess.h>
12  #include <uapi/linux/poll.h>
13  #include <uapi/linux/eventpoll.h>
14  
15  /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
16     additional memory. */
17  #define MAX_STACK_ALLOC 832
18  #define FRONTEND_STACK_ALLOC	256
19  #define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
20  #define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
21  #define WQUEUES_STACK_ALLOC	(MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
22  #define N_INLINE_POLL_ENTRIES	(WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
23  
24  #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
25  
26  struct poll_table_struct;
27  
28  /*
29   * structures and helpers for f_op->poll implementations
30   */
31  typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
32  
33  /*
34   * Do not touch the structure directly, use the access functions
35   * poll_does_not_wait() and poll_requested_events() instead.
36   */
37  typedef struct poll_table_struct {
38  	poll_queue_proc _qproc;
39  	__poll_t _key;
40  } poll_table;
41  
poll_wait(struct file * filp,wait_queue_head_t * wait_address,poll_table * p)42  static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
43  {
44  	if (p && p->_qproc && wait_address)
45  		p->_qproc(filp, wait_address, p);
46  }
47  
48  /*
49   * Return true if it is guaranteed that poll will not wait. This is the case
50   * if the poll() of another file descriptor in the set got an event, so there
51   * is no need for waiting.
52   */
poll_does_not_wait(const poll_table * p)53  static inline bool poll_does_not_wait(const poll_table *p)
54  {
55  	return p == NULL || p->_qproc == NULL;
56  }
57  
58  /*
59   * Return the set of events that the application wants to poll for.
60   * This is useful for drivers that need to know whether a DMA transfer has
61   * to be started implicitly on poll(). You typically only want to do that
62   * if the application is actually polling for POLLIN and/or POLLOUT.
63   */
poll_requested_events(const poll_table * p)64  static inline __poll_t poll_requested_events(const poll_table *p)
65  {
66  	return p ? p->_key : ~(__poll_t)0;
67  }
68  
init_poll_funcptr(poll_table * pt,poll_queue_proc qproc)69  static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
70  {
71  	pt->_qproc = qproc;
72  	pt->_key   = ~(__poll_t)0; /* all events enabled */
73  }
74  
file_can_poll(struct file * file)75  static inline bool file_can_poll(struct file *file)
76  {
77  	return file->f_op->poll;
78  }
79  
vfs_poll(struct file * file,struct poll_table_struct * pt)80  static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
81  {
82  	if (unlikely(!file->f_op->poll))
83  		return DEFAULT_POLLMASK;
84  	return file->f_op->poll(file, pt);
85  }
86  
87  struct poll_table_entry {
88  	struct file *filp;
89  	__poll_t key;
90  	wait_queue_entry_t wait;
91  	wait_queue_head_t *wait_address;
92  };
93  
94  /*
95   * Structures and helpers for select/poll syscall
96   */
97  struct poll_wqueues {
98  	poll_table pt;
99  	struct poll_table_page *table;
100  	struct task_struct *polling_task;
101  	int triggered;
102  	int error;
103  	int inline_index;
104  	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
105  };
106  
107  extern void poll_initwait(struct poll_wqueues *pwq);
108  extern void poll_freewait(struct poll_wqueues *pwq);
109  extern u64 select_estimate_accuracy(struct timespec64 *tv);
110  
111  #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
112  
113  extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
114  			   fd_set __user *exp, struct timespec64 *end_time);
115  
116  extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
117  				   long nsec);
118  
119  #define __MAP(v, from, to) \
120  	(from < to ? (v & from) * (to/from) : (v & from) / (from/to))
121  
mangle_poll(__poll_t val)122  static inline __u16 mangle_poll(__poll_t val)
123  {
124  	__u16 v = (__force __u16)val;
125  #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
126  	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
127  		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
128  		M(HUP) | M(RDHUP) | M(MSG);
129  #undef M
130  }
131  
demangle_poll(u16 val)132  static inline __poll_t demangle_poll(u16 val)
133  {
134  #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
135  	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
136  		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
137  		M(HUP) | M(RDHUP) | M(MSG);
138  #undef M
139  }
140  #undef __MAP
141  
142  
143  #endif /* _LINUX_POLL_H */
144