+ if(io_tree.tail) {
+ io_t *last = io_tree.tail->data;
+ fds = last->fd + 1;
+ }
+
+ int n = select(fds, &readable, &writable, NULL, tv);
+
+ if(n < 0) {
+ if(sockwouldblock(sockerrno))
+ continue;
+ else
+ return false;
+ }
+
+ if(!n)
+ continue;
+
+ for splay_each(io_t, io, &io_tree) {
+ if(FD_ISSET(io->fd, &writable))
+ io->cb(io->data, IO_WRITE);
+ else if(FD_ISSET(io->fd, &readable))
+ io->cb(io->data, IO_READ);
+ else
+ continue;
+
+ /*
+ There are scenarios in which the callback will remove another io_t from the tree
+ (e.g. closing a double connection). Since splay_each does not support that, we
+ need to exit the loop now. That's okay, since any remaining events will get picked
+ up by the next select() call.
+ */
+ break;
+ }
+ }
+#else
+ while (running) {
+ struct timeval diff;
+ struct timeval *tv = get_time_remaining(&diff);
+ DWORD timeout_ms = tv ? (tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
+
+ if (!event_count) {
+ Sleep(timeout_ms);
+ continue;
+ }
+
+ /*
+ For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
+ which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
+ it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
+ is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
+ to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
+
+ Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
+ this event being fired again if ignored.
+ */
+ io_t* writeable_io = NULL;
+ for splay_each(io_t, io, &io_tree)
+ if (io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
+ writeable_io = io;
+ break;
+ }
+ if (writeable_io) {
+ writeable_io->cb(writeable_io->data, IO_WRITE);
+ continue;
+ }