From c4b0dd6b81b51026b6faa7301207a1d268cf353e Mon Sep 17 00:00:00 2001 From: leitner Date: Fri, 12 Mar 2021 10:43:46 +0000 Subject: [PATCH] move cleanup after check that all events are handled --- io/io_waituntil2.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/io/io_waituntil2.c b/io/io_waituntil2.c index 1850d55..a3a14b5 100644 --- a/io/io_waituntil2.c +++ b/io/io_waituntil2.c @@ -120,18 +120,7 @@ int64 io_waituntil2(int64 milliseconds) { struct pollfd* p; #endif long i,j,r; - if (first_deferred!=-1) { - while (first_deferred!=-1) { - io_entry* e=iarray_get(&io_fds,first_deferred); - if (e && e->closed) { - e->closed=0; - close(first_deferred); - first_deferred=e->next_defer; - e->next_defer=-1; - } else - first_deferred=-1; // can't happen - } - } + /* if no interest in events has been registered, then return * immediately */ if (!io_wanted_fds) return 0; @@ -139,6 +128,22 @@ int64 io_waituntil2(int64 milliseconds) { /* only actually wait if all previous events have been dequeued */ if (first_readable!=-1 || first_writeable!=-1) return 0; + /* There is a race if we get events on a socket, and someone calls + * io_close on the fd before they are handled. Those events are in a + * queue. So we try to detect if there are still queued events in + * io_close and then not actually close the descriptor but set + * e->closed so we can clean up the descriptor here. */ + while (first_deferred!=-1) { + io_entry* e=iarray_get(&io_fds,first_deferred); + if (e && e->closed) { + e->closed=0; + close(first_deferred); + first_deferred=e->next_defer; + e->next_defer=-1; + } else + first_deferred=-1; // can't happen + } + #ifdef HAVE_EPOLL if (io_waitmode==EPOLL) { int n;