Release 4.12 include/linux/poll.h
#ifndef _LINUX_POLL_H
#define _LINUX_POLL_H
#include <linux/compiler.h>
#include <linux/ktime.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
#define MAX_STACK_ALLOC 832
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
struct poll_table_struct;
/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
/*
* Do not touch the structure directly, use the access functions
* poll_does_not_wait() and poll_requested_events() instead.
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
unsigned long _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && p->_qproc && wait_address)
p->_qproc(filp, wait_address, p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 33 | 80.49% | 5 | 62.50% |
Hans Verkuil | 5 | 12.20% | 1 | 12.50% |
Davide Libenzi | 2 | 4.88% | 1 | 12.50% |
Linus Torvalds | 1 | 2.44% | 1 | 12.50% |
Total | 41 | 100.00% | 8 | 100.00% |
/*
* Return true if it is guaranteed that poll will not wait. This is the case
* if the poll() of another file descriptor in the set got an event, so there
* is no need for waiting.
*/
static inline bool poll_does_not_wait(const poll_table *p)
{
return p == NULL || p->_qproc == NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* Return the set of events that the application wants to poll for.
* This is useful for drivers that need to know whether a DMA transfer has
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
static inline unsigned long poll_requested_events(const poll_table *p)
{
return p ? p->_key : ~0UL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Verkuil | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
pt->_key = ~0UL; /* all events enabled */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davide Libenzi | 17 | 60.71% | 2 | 40.00% |
Eric Dumazet | 7 | 25.00% | 1 | 20.00% |
Manfred Spraul | 2 | 7.14% | 1 | 20.00% |
Hans Verkuil | 2 | 7.14% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
struct poll_table_entry {
struct file *filp;
unsigned long key;
wait_queue_t wait;
wait_queue_head_t *wait_address;
};
/*
* Structures and helpers for select/poll syscall
*/
struct poll_wqueues {
poll_table pt;
struct poll_table_page *table;
struct task_struct *polling_task;
int triggered;
int error;
int inline_index;
struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
};
extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec64 *end_time);
extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
long nsec);
#endif /* _LINUX_POLL_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 64 | 16.89% | 10 | 29.41% |
Hans Verkuil | 58 | 15.30% | 1 | 2.94% |
Andi Kleen | 52 | 13.72% | 1 | 2.94% |
Davide Libenzi | 51 | 13.46% | 3 | 8.82% |
Tejun Heo | 29 | 7.65% | 1 | 2.94% |
Manfred Spraul | 26 | 6.86% | 1 | 2.94% |
Al Viro | 25 | 6.60% | 2 | 5.88% |
Eric Dumazet | 14 | 3.69% | 1 | 2.94% |
Thomas Gleixner | 14 | 3.69% | 1 | 2.94% |
Dave Young | 10 | 2.64% | 1 | 2.94% |
Alexey Dobriyan | 8 | 2.11% | 2 | 5.88% |
Shawn Bohrer | 8 | 2.11% | 1 | 2.94% |
David Woodhouse | 4 | 1.06% | 1 | 2.94% |
Deepa Dinamani | 4 | 1.06% | 1 | 2.94% |
David Howells | 3 | 0.79% | 1 | 2.94% |
Andrew Morton | 3 | 0.79% | 1 | 2.94% |
Arjan van de Ven | 2 | 0.53% | 1 | 2.94% |
Linus Torvalds | 2 | 0.53% | 2 | 5.88% |
Namhyung Kim | 1 | 0.26% | 1 | 2.94% |
John Stultz | 1 | 0.26% | 1 | 2.94% |
Total | 379 | 100.00% | 34 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.