2 * This file contains the procedures for the handling of select and poll
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/rt.h>
30 #include <net/ll_poll.h>
32 #include <asm/uaccess.h>
36 * Estimate expected accuracy in ns from a timeval.
38 * After quite a bit of churning around, we've settled on
39 * a simple thing of taking 0.1% of the timeout as the
40 * slack, with a cap of 100 msec.
41 * "nice" tasks get a 0.5% slack instead.
43 * Consider this comment an open invitation to come up with even
47 #define MAX_SLACK (100 * NSEC_PER_MSEC)
49 static long __estimate_accuracy(struct timespec *tv)
57 if (task_nice(current) > 0)
58 divfactor = divfactor / 5;
60 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
63 slack = tv->tv_nsec / divfactor;
64 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
66 if (slack > MAX_SLACK)
72 long select_estimate_accuracy(struct timespec *tv)
78 * Realtime tasks get a slack of 0 for obvious reasons.
85 now = timespec_sub(*tv, now);
86 ret = __estimate_accuracy(&now);
87 if (ret < current->timer_slack_ns)
88 return current->timer_slack_ns;
94 struct poll_table_page {
95 struct poll_table_page * next;
96 struct poll_table_entry * entry;
97 struct poll_table_entry entries[0];
100 #define POLL_TABLE_FULL(table) \
101 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
104 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
105 * I have rewritten this, taking some shortcuts: This code may not be easy to
106 * follow, but it should be free of race-conditions, and it's practical. If you
107 * understand what I'm doing here, then you understand how the linux
108 * sleep/wakeup mechanism works.
110 * Two very simple procedures, poll_wait() and poll_freewait() make all the
111 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
112 * as all select/poll functions have to call it to add an entry to the
115 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
118 void poll_initwait(struct poll_wqueues *pwq)
120 init_poll_funcptr(&pwq->pt, __pollwait);
121 pwq->polling_task = current;
125 pwq->inline_index = 0;
127 EXPORT_SYMBOL(poll_initwait);
129 static void free_poll_entry(struct poll_table_entry *entry)
131 remove_wait_queue(entry->wait_address, &entry->wait);
135 void poll_freewait(struct poll_wqueues *pwq)
137 struct poll_table_page * p = pwq->table;
139 for (i = 0; i < pwq->inline_index; i++)
140 free_poll_entry(pwq->inline_entries + i);
142 struct poll_table_entry * entry;
143 struct poll_table_page *old;
148 free_poll_entry(entry);
149 } while (entry > p->entries);
152 free_page((unsigned long) old);
155 EXPORT_SYMBOL(poll_freewait);
157 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159 struct poll_table_page *table = p->table;
161 if (p->inline_index < N_INLINE_POLL_ENTRIES)
162 return p->inline_entries + p->inline_index++;
164 if (!table || POLL_TABLE_FULL(table)) {
165 struct poll_table_page *new_table;
167 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
172 new_table->entry = new_table->entries;
173 new_table->next = table;
174 p->table = new_table;
178 return table->entry++;
181 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
183 struct poll_wqueues *pwq = wait->private;
184 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
187 * Although this function is called under waitqueue lock, LOCK
188 * doesn't imply write barrier and the users expect write
189 * barrier semantics on wakeup functions. The following
190 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
191 * and is paired with set_mb() in poll_schedule_timeout.
197 * Perform the default wake up operation using a dummy
200 * TODO: This is hacky but there currently is no interface to
201 * pass in @sync. @sync is scheduled to be removed and once
202 * that happens, wake_up_process() can be used directly.
204 return default_wake_function(&dummy_wait, mode, sync, key);
207 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
209 struct poll_table_entry *entry;
211 entry = container_of(wait, struct poll_table_entry, wait);
212 if (key && !((unsigned long)key & entry->key))
214 return __pollwake(wait, mode, sync, key);
217 /* Add a new entry */
218 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
221 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
222 struct poll_table_entry *entry = poll_get_entry(pwq);
225 entry->filp = get_file(filp);
226 entry->wait_address = wait_address;
227 entry->key = p->_key;
228 init_waitqueue_func_entry(&entry->wait, pollwake);
229 entry->wait.private = pwq;
230 add_wait_queue(wait_address, &entry->wait);
233 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
234 ktime_t *expires, unsigned long slack)
238 set_current_state(state);
240 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
241 __set_current_state(TASK_RUNNING);
244 * Prepare for the next iteration.
246 * The following set_mb() serves two purposes. First, it's
247 * the counterpart rmb of the wmb in pollwake() such that data
248 * written before wake up is always visible after wake up.
249 * Second, the full barrier guarantees that triggered clearing
250 * doesn't pass event check of the next iteration. Note that
251 * this problem doesn't exist for the first iteration as
252 * add_wait_queue() has full barrier semantics.
254 set_mb(pwq->triggered, 0);
258 EXPORT_SYMBOL(poll_schedule_timeout);
261 * poll_select_set_timeout - helper function to setup the timeout value
262 * @to: pointer to timespec variable for the final timeout
263 * @sec: seconds (from user space)
264 * @nsec: nanoseconds (from user space)
266 * Note, we do not use a timespec for the user space value here, That
267 * way we can use the function for timeval and compat interfaces as well.
269 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
271 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
273 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
275 if (!timespec_valid(&ts))
278 /* Optimize for the zero timeout value here */
280 to->tv_sec = to->tv_nsec = 0;
283 *to = timespec_add_safe(*to, ts);
288 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
289 int timeval, int ret)
297 if (current->personality & STICKY_TIMEOUTS)
300 /* No update for zero timeout */
301 if (!end_time->tv_sec && !end_time->tv_nsec)
305 rts = timespec_sub(*end_time, rts);
307 rts.tv_sec = rts.tv_nsec = 0;
310 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
311 memset(&rtv, 0, sizeof(rtv));
312 rtv.tv_sec = rts.tv_sec;
313 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
315 if (!copy_to_user(p, &rtv, sizeof(rtv)))
318 } else if (!copy_to_user(p, &rts, sizeof(rts)))
322 * If an application puts its timeval in read-only memory, we
323 * don't want the Linux-specific update to the timeval to
324 * cause a fault after the select has completed
325 * successfully. However, because we're not updating the
326 * timeval, we can't restart the system call.
330 if (ret == -ERESTARTNOHAND)
335 #define FDS_IN(fds, n) (fds->in + n)
336 #define FDS_OUT(fds, n) (fds->out + n)
337 #define FDS_EX(fds, n) (fds->ex + n)
339 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
341 static int max_select_fd(unsigned long n, fd_set_bits *fds)
343 unsigned long *open_fds;
348 /* handle last in-complete long-word first */
349 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
351 fdt = files_fdtable(current->files);
352 open_fds = fdt->open_fds + n;
357 if (!(set & ~*open_fds))
368 if (set & ~*open_fds)
377 max += n * BITS_PER_LONG;
383 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
384 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
385 #define POLLEX_SET (POLLPRI)
387 static inline void wait_key_set(poll_table *wait, unsigned long in,
388 unsigned long out, unsigned long bit,
389 unsigned int ll_flag)
391 wait->_key = POLLEX_SET | ll_flag;
393 wait->_key |= POLLIN_SET;
395 wait->_key |= POLLOUT_SET;
398 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
400 ktime_t expire, *to = NULL;
401 struct poll_wqueues table;
403 int retval, i, timed_out = 0;
404 unsigned long slack = 0;
405 unsigned int ll_flag = ll_get_flag();
406 u64 ll_time = ll_end_time();
409 retval = max_select_fd(n, fds);
416 poll_initwait(&table);
418 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
423 if (end_time && !timed_out)
424 slack = select_estimate_accuracy(end_time);
428 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
431 inp = fds->in; outp = fds->out; exp = fds->ex;
432 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
434 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
435 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
436 unsigned long res_in = 0, res_out = 0, res_ex = 0;
438 in = *inp++; out = *outp++; ex = *exp++;
439 all_bits = in | out | ex;
445 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
449 if (!(bit & all_bits))
453 const struct file_operations *f_op;
455 mask = DEFAULT_POLLMASK;
456 if (f_op && f_op->poll) {
457 wait_key_set(wait, in, out,
459 mask = (*f_op->poll)(f.file, wait);
462 if ((mask & POLLIN_SET) && (in & bit)) {
467 if ((mask & POLLOUT_SET) && (out & bit)) {
472 if ((mask & POLLEX_SET) && (ex & bit)) {
479 /* got something, stop busy polling */
493 if (retval || timed_out || signal_pending(current))
496 retval = table.error;
500 /* only if on, have sockets with POLL_LL and not out of time */
501 if (ll_flag && can_ll && can_poll_ll(ll_time))
505 * If this is the first loop and we have a timeout
506 * given, then we convert to ktime_t and set the to
507 * pointer to the expiry value.
509 if (end_time && !to) {
510 expire = timespec_to_ktime(*end_time);
514 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
519 poll_freewait(&table);
525 * We can actually return ERESTARTSYS instead of EINTR, but I'd
526 * like to be certain this leads to no problems. So I return
527 * EINTR just for safety.
529 * Update: ERESTARTSYS breaks at least the xview clock binary, so
530 * I'm trying ERESTARTNOHAND which restart only when you want to.
532 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
533 fd_set __user *exp, struct timespec *end_time)
540 /* Allocate small arguments on the stack to save memory and be faster */
541 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
547 /* max_fds can increase, so grab it once to avoid race */
549 fdt = files_fdtable(current->files);
550 max_fds = fdt->max_fds;
556 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
557 * since we used fdset we need to allocate memory in units of
562 if (size > sizeof(stack_fds) / 6) {
563 /* Not enough space in on-stack array; must use kmalloc */
565 bits = kmalloc(6 * size, GFP_KERNEL);
570 fds.out = bits + size;
571 fds.ex = bits + 2*size;
572 fds.res_in = bits + 3*size;
573 fds.res_out = bits + 4*size;
574 fds.res_ex = bits + 5*size;
576 if ((ret = get_fd_set(n, inp, fds.in)) ||
577 (ret = get_fd_set(n, outp, fds.out)) ||
578 (ret = get_fd_set(n, exp, fds.ex)))
580 zero_fd_set(n, fds.res_in);
581 zero_fd_set(n, fds.res_out);
582 zero_fd_set(n, fds.res_ex);
584 ret = do_select(n, &fds, end_time);
589 ret = -ERESTARTNOHAND;
590 if (signal_pending(current))
595 if (set_fd_set(n, inp, fds.res_in) ||
596 set_fd_set(n, outp, fds.res_out) ||
597 set_fd_set(n, exp, fds.res_ex))
601 if (bits != stack_fds)
607 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
608 fd_set __user *, exp, struct timeval __user *, tvp)
610 struct timespec end_time, *to = NULL;
615 if (copy_from_user(&tv, tvp, sizeof(tv)))
619 if (poll_select_set_timeout(to,
620 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
621 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
625 ret = core_sys_select(n, inp, outp, exp, to);
626 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
631 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
632 fd_set __user *exp, struct timespec __user *tsp,
633 const sigset_t __user *sigmask, size_t sigsetsize)
635 sigset_t ksigmask, sigsaved;
636 struct timespec ts, end_time, *to = NULL;
640 if (copy_from_user(&ts, tsp, sizeof(ts)))
644 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
649 /* XXX: Don't preclude handling different sized sigset_t's. */
650 if (sigsetsize != sizeof(sigset_t))
652 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
655 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
656 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
659 ret = core_sys_select(n, inp, outp, exp, to);
660 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
662 if (ret == -ERESTARTNOHAND) {
664 * Don't restore the signal mask yet. Let do_signal() deliver
665 * the signal on the way back to userspace, before the signal
669 memcpy(¤t->saved_sigmask, &sigsaved,
671 set_restore_sigmask();
674 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
680 * Most architectures can't handle 7-argument syscalls. So we provide a
681 * 6-argument version where the sixth argument is a pointer to a structure
682 * which has a pointer to the sigset_t itself followed by a size_t containing
685 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
686 fd_set __user *, exp, struct timespec __user *, tsp,
689 size_t sigsetsize = 0;
690 sigset_t __user *up = NULL;
693 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
694 || __get_user(up, (sigset_t __user * __user *)sig)
695 || __get_user(sigsetsize,
696 (size_t __user *)(sig+sizeof(void *))))
700 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
703 #ifdef __ARCH_WANT_SYS_OLD_SELECT
704 struct sel_arg_struct {
706 fd_set __user *inp, *outp, *exp;
707 struct timeval __user *tvp;
710 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
712 struct sel_arg_struct a;
714 if (copy_from_user(&a, arg, sizeof(a)))
716 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
721 struct poll_list *next;
723 struct pollfd entries[0];
726 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
729 * Fish for pollable events on the pollfd->fd file descriptor. We're only
730 * interested in events matching the pollfd->events mask, and the result
731 * matching that mask is both recorded in pollfd->revents and returned. The
732 * pwait poll_table will be used by the fd-provided poll handler for waiting,
733 * if pwait->_qproc is non-NULL.
735 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
736 bool *can_ll, unsigned int ll_flag)
744 struct fd f = fdget(fd);
747 mask = DEFAULT_POLLMASK;
748 if (f.file->f_op && f.file->f_op->poll) {
749 pwait->_key = pollfd->events|POLLERR|POLLHUP;
750 pwait->_key |= ll_flag;
751 mask = f.file->f_op->poll(f.file, pwait);
755 /* Mask out unneeded events. */
756 mask &= pollfd->events | POLLERR | POLLHUP;
760 pollfd->revents = mask;
765 static int do_poll(unsigned int nfds, struct poll_list *list,
766 struct poll_wqueues *wait, struct timespec *end_time)
768 poll_table* pt = &wait->pt;
769 ktime_t expire, *to = NULL;
770 int timed_out = 0, count = 0;
771 unsigned long slack = 0;
772 unsigned int ll_flag = ll_get_flag();
773 u64 ll_time = ll_end_time();
775 /* Optimise the no-wait case */
776 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
781 if (end_time && !timed_out)
782 slack = select_estimate_accuracy(end_time);
785 struct poll_list *walk;
788 for (walk = list; walk != NULL; walk = walk->next) {
789 struct pollfd * pfd, * pfd_end;
792 pfd_end = pfd + walk->len;
793 for (; pfd != pfd_end; pfd++) {
795 * Fish for events. If we found one, record it
796 * and kill poll_table->_qproc, so we don't
797 * needlessly register any other waiters after
798 * this. They'll get immediately deregistered
799 * when we break out and return.
801 if (do_pollfd(pfd, pt, &can_ll, ll_flag)) {
809 * All waiters have already been registered, so don't provide
810 * a poll_table->_qproc to them on the next loop iteration.
815 if (signal_pending(current))
818 if (count || timed_out)
821 /* only if on, have sockets with POLL_LL and not out of time */
822 if (ll_flag && can_ll && can_poll_ll(ll_time))
826 * If this is the first loop and we have a timeout
827 * given, then we convert to ktime_t and set the to
828 * pointer to the expiry value.
830 if (end_time && !to) {
831 expire = timespec_to_ktime(*end_time);
835 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
841 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
842 sizeof(struct pollfd))
844 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
845 struct timespec *end_time)
847 struct poll_wqueues table;
848 int err = -EFAULT, fdcount, len, size;
849 /* Allocate small arguments on the stack to save memory and be
850 faster - use long to make sure the buffer is aligned properly
851 on 64 bit archs to avoid unaligned access */
852 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
853 struct poll_list *const head = (struct poll_list *)stack_pps;
854 struct poll_list *walk = head;
855 unsigned long todo = nfds;
857 if (nfds > rlimit(RLIMIT_NOFILE))
860 len = min_t(unsigned int, nfds, N_STACK_PPS);
867 if (copy_from_user(walk->entries, ufds + nfds-todo,
868 sizeof(struct pollfd) * walk->len))
875 len = min(todo, POLLFD_PER_PAGE);
876 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
877 walk = walk->next = kmalloc(size, GFP_KERNEL);
884 poll_initwait(&table);
885 fdcount = do_poll(nfds, head, &table, end_time);
886 poll_freewait(&table);
888 for (walk = head; walk; walk = walk->next) {
889 struct pollfd *fds = walk->entries;
892 for (j = 0; j < walk->len; j++, ufds++)
893 if (__put_user(fds[j].revents, &ufds->revents))
901 struct poll_list *pos = walk;
909 static long do_restart_poll(struct restart_block *restart_block)
911 struct pollfd __user *ufds = restart_block->poll.ufds;
912 int nfds = restart_block->poll.nfds;
913 struct timespec *to = NULL, end_time;
916 if (restart_block->poll.has_timeout) {
917 end_time.tv_sec = restart_block->poll.tv_sec;
918 end_time.tv_nsec = restart_block->poll.tv_nsec;
922 ret = do_sys_poll(ufds, nfds, to);
925 restart_block->fn = do_restart_poll;
926 ret = -ERESTART_RESTARTBLOCK;
931 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
934 struct timespec end_time, *to = NULL;
937 if (timeout_msecs >= 0) {
939 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
940 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
943 ret = do_sys_poll(ufds, nfds, to);
946 struct restart_block *restart_block;
948 restart_block = ¤t_thread_info()->restart_block;
949 restart_block->fn = do_restart_poll;
950 restart_block->poll.ufds = ufds;
951 restart_block->poll.nfds = nfds;
953 if (timeout_msecs >= 0) {
954 restart_block->poll.tv_sec = end_time.tv_sec;
955 restart_block->poll.tv_nsec = end_time.tv_nsec;
956 restart_block->poll.has_timeout = 1;
958 restart_block->poll.has_timeout = 0;
960 ret = -ERESTART_RESTARTBLOCK;
965 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
966 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
969 sigset_t ksigmask, sigsaved;
970 struct timespec ts, end_time, *to = NULL;
974 if (copy_from_user(&ts, tsp, sizeof(ts)))
978 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
983 /* XXX: Don't preclude handling different sized sigset_t's. */
984 if (sigsetsize != sizeof(sigset_t))
986 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
989 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
990 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
993 ret = do_sys_poll(ufds, nfds, to);
995 /* We can restart this syscall, usually */
998 * Don't restore the signal mask yet. Let do_signal() deliver
999 * the signal on the way back to userspace, before the signal
1003 memcpy(¤t->saved_sigmask, &sigsaved,
1005 set_restore_sigmask();
1007 ret = -ERESTARTNOHAND;
1009 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1011 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);