Fix UBSAN warnings about conversions and overflows.
[tinc] / src / event.c
1 /*
2     event.c -- I/O, timeout and signal event handling
3     Copyright (C) 2012-2021 Guus Sliepen <guus@tinc-vpn.org>
4
5     This program is free software; you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation; either version 2 of the License, or
8     (at your option) any later version.
9
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14
15     You should have received a copy of the GNU General Public License along
16     with this program; if not, write to the Free Software Foundation, Inc.,
17     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include "system.h"
21 #include "event.h"
22 #include "utils.h"
23
24 struct timeval now;
25
26 #ifndef HAVE_MINGW
27 static fd_set readfds;
28 static fd_set writefds;
29 #else
30 static const long READ_EVENTS = FD_READ | FD_ACCEPT | FD_CLOSE;
31 static const long WRITE_EVENTS = FD_WRITE | FD_CONNECT;
32 static DWORD event_count = 0;
33 #endif
34 static bool running;
35
36 static int io_compare(const io_t *a, const io_t *b) {
37 #ifndef HAVE_MINGW
38         return a->fd - b->fd;
39 #else
40
41         if(a->event < b->event) {
42                 return -1;
43         }
44
45         if(a->event > b->event) {
46                 return 1;
47         }
48
49         return 0;
50 #endif
51 }
52
53 static int timeout_compare(const timeout_t *a, const timeout_t *b) {
54         struct timeval diff;
55         timersub(&a->tv, &b->tv, &diff);
56
57         if(diff.tv_sec < 0) {
58                 return -1;
59         }
60
61         if(diff.tv_sec > 0) {
62                 return 1;
63         }
64
65         if(diff.tv_usec < 0) {
66                 return -1;
67         }
68
69         if(diff.tv_usec > 0) {
70                 return 1;
71         }
72
73         if(a < b) {
74                 return -1;
75         }
76
77         if(a > b) {
78                 return 1;
79         }
80
81         return 0;
82 }
83
84 static splay_tree_t io_tree = {.compare = (splay_compare_t)io_compare};
85 static splay_tree_t timeout_tree = {.compare = (splay_compare_t)timeout_compare};
86
87 void io_add(io_t *io, io_cb_t cb, void *data, int fd, int flags) {
88         if(io->cb) {
89                 return;
90         }
91
92         io->fd = fd;
93 #ifdef HAVE_MINGW
94
95         if(io->fd != -1) {
96                 io->event = WSACreateEvent();
97
98                 if(io->event == WSA_INVALID_EVENT) {
99                         abort();
100                 }
101         }
102
103         event_count++;
104 #endif
105         io->cb = cb;
106         io->data = data;
107         io->node.data = io;
108
109         io_set(io, flags);
110
111         if(!splay_insert_node(&io_tree, &io->node)) {
112                 abort();
113         }
114 }
115
116 #ifdef HAVE_MINGW
117 void io_add_event(io_t *io, io_cb_t cb, void *data, WSAEVENT event) {
118         io->event = event;
119         io_add(io, cb, data, -1, 0);
120 }
121 #endif
122
123 void io_set(io_t *io, int flags) {
124         if(flags == io->flags) {
125                 return;
126         }
127
128         io->flags = flags;
129
130         if(io->fd == -1) {
131                 return;
132         }
133
134 #ifndef HAVE_MINGW
135
136         if(flags & IO_READ) {
137                 FD_SET(io->fd, &readfds);
138         } else {
139                 FD_CLR(io->fd, &readfds);
140         }
141
142         if(flags & IO_WRITE) {
143                 FD_SET(io->fd, &writefds);
144         } else {
145                 FD_CLR(io->fd, &writefds);
146         }
147
148 #else
149         long events = 0;
150
151         if(flags & IO_WRITE) {
152                 events |= WRITE_EVENTS;
153         }
154
155         if(flags & IO_READ) {
156                 events |= READ_EVENTS;
157         }
158
159         if(WSAEventSelect(io->fd, io->event, events) != 0) {
160                 abort();
161         }
162
163 #endif
164 }
165
166 void io_del(io_t *io) {
167         if(!io->cb) {
168                 return;
169         }
170
171         io_set(io, 0);
172 #ifdef HAVE_MINGW
173
174         if(io->fd != -1 && WSACloseEvent(io->event) == FALSE) {
175                 abort();
176         }
177
178         event_count--;
179 #endif
180
181         splay_unlink_node(&io_tree, &io->node);
182         io->cb = NULL;
183 }
184
185 void timeout_add(timeout_t *timeout, timeout_cb_t cb, void *data, struct timeval *tv) {
186         timeout->cb = cb;
187         timeout->data = data;
188         timeout->node.data = timeout;
189
190         timeout_set(timeout, tv);
191 }
192
193 void timeout_set(timeout_t *timeout, struct timeval *tv) {
194         if(timerisset(&timeout->tv)) {
195                 splay_unlink_node(&timeout_tree, &timeout->node);
196         }
197
198         if(!now.tv_sec) {
199                 gettimeofday(&now, NULL);
200         }
201
202         timeradd(&now, tv, &timeout->tv);
203
204         if(!splay_insert_node(&timeout_tree, &timeout->node)) {
205                 abort();
206         }
207 }
208
209 void timeout_del(timeout_t *timeout) {
210         if(!timeout->cb) {
211                 return;
212         }
213
214         splay_unlink_node(&timeout_tree, &timeout->node);
215         timeout->cb = 0;
216         timeout->tv = (struct timeval) {
217                 0, 0
218         };
219 }
220
221 #ifndef HAVE_MINGW
222 static int signal_compare(const signal_t *a, const signal_t *b) {
223         return a->signum - b->signum;
224 }
225
226 static io_t signalio;
227 static int pipefd[2] = {-1, -1};
228 static splay_tree_t signal_tree = {.compare = (splay_compare_t)signal_compare};
229
230 static void signal_handler(int signum) {
231         unsigned char num = signum;
232         write(pipefd[1], &num, 1);
233 }
234
235 static void signalio_handler(void *data, int flags) {
236         (void)data;
237         (void)flags;
238         unsigned char signum;
239
240         if(read(pipefd[0], &signum, 1) != 1) {
241                 return;
242         }
243
244         signal_t *sig = splay_search(&signal_tree, &((signal_t) {
245                 .signum = signum
246         }));
247
248         if(sig) {
249                 sig->cb(sig->data);
250         }
251 }
252
253 static void pipe_init(void) {
254         if(!pipe(pipefd)) {
255                 io_add(&signalio, signalio_handler, NULL, pipefd[0], IO_READ);
256         }
257 }
258
259 void signal_add(signal_t *sig, signal_cb_t cb, void *data, int signum) {
260         if(sig->cb) {
261                 return;
262         }
263
264         sig->cb = cb;
265         sig->data = data;
266         sig->signum = signum;
267         sig->node.data = sig;
268
269         if(pipefd[0] == -1) {
270                 pipe_init();
271         }
272
273         signal(sig->signum, signal_handler);
274
275         if(!splay_insert_node(&signal_tree, &sig->node)) {
276                 abort();
277         }
278 }
279
280 void signal_del(signal_t *sig) {
281         if(!sig->cb) {
282                 return;
283         }
284
285         signal(sig->signum, SIG_DFL);
286
287         splay_unlink_node(&signal_tree, &sig->node);
288         sig->cb = NULL;
289 }
290 #endif
291
292 static struct timeval *get_time_remaining(struct timeval *diff) {
293         gettimeofday(&now, NULL);
294         struct timeval *tv = NULL;
295
296         while(timeout_tree.head) {
297                 timeout_t *timeout = timeout_tree.head->data;
298                 timersub(&timeout->tv, &now, diff);
299
300                 if(diff->tv_sec < 0) {
301                         timeout->cb(timeout->data);
302
303                         if(timercmp(&timeout->tv, &now, <)) {
304                                 timeout_del(timeout);
305                         }
306                 } else {
307                         tv = diff;
308                         break;
309                 }
310         }
311
312         return tv;
313 }
314
315 bool event_loop(void) {
316         running = true;
317
318 #ifndef HAVE_MINGW
319         fd_set readable;
320         fd_set writable;
321
322         while(running) {
323                 struct timeval diff;
324                 struct timeval *tv = get_time_remaining(&diff);
325                 memcpy(&readable, &readfds, sizeof(readable));
326                 memcpy(&writable, &writefds, sizeof(writable));
327
328                 int fds = 0;
329
330                 if(io_tree.tail) {
331                         io_t *last = io_tree.tail->data;
332                         fds = last->fd + 1;
333                 }
334
335                 int n = select(fds, &readable, &writable, NULL, tv);
336
337                 if(n < 0) {
338                         if(sockwouldblock(sockerrno)) {
339                                 continue;
340                         } else {
341                                 return false;
342                         }
343                 }
344
345                 if(!n) {
346                         continue;
347                 }
348
349                 unsigned int curgen = io_tree.generation;
350
351                 for splay_each(io_t, io, &io_tree) {
352                         if(FD_ISSET(io->fd, &writable)) {
353                                 io->cb(io->data, IO_WRITE);
354                         } else if(FD_ISSET(io->fd, &readable)) {
355                                 io->cb(io->data, IO_READ);
356                         } else {
357                                 continue;
358                         }
359
360                         /*
361                            There are scenarios in which the callback will remove another io_t from the tree
362                            (e.g. closing a double connection). Since splay_each does not support that, we
363                            need to exit the loop if that happens. That's okay, since any remaining events will
364                            get picked up by the next select() call.
365                          */
366                         if(curgen != io_tree.generation) {
367                                 break;
368                         }
369                 }
370         }
371
372 #else
373
374         while(running) {
375                 struct timeval diff;
376                 struct timeval *tv = get_time_remaining(&diff);
377                 DWORD timeout_ms = tv ? (DWORD)(tv->tv_sec * 1000 + tv->tv_usec / 1000 + 1) : WSA_INFINITE;
378
379                 if(!event_count) {
380                         Sleep(timeout_ms);
381                         continue;
382                 }
383
384                 /*
385                    For some reason, Microsoft decided to make the FD_WRITE event edge-triggered instead of level-triggered,
386                    which is the opposite of what select() does. In practice, that means that if a FD_WRITE event triggers,
387                    it will never trigger again until a send() returns EWOULDBLOCK. Since the semantics of this event loop
388                    is that write events are level-triggered (i.e. they continue firing until the socket is full), we need
389                    to emulate these semantics by making sure we fire each IO_WRITE that is still writeable.
390
391                    Note that technically FD_CLOSE has the same problem, but it's okay because user code does not rely on
392                    this event being fired again if ignored.
393                 */
394                 unsigned int curgen = io_tree.generation;
395
396                 for splay_each(io_t, io, &io_tree) {
397                         if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0) {
398                                 io->cb(io->data, IO_WRITE);
399
400                                 if(curgen != io_tree.generation) {
401                                         break;
402                                 }
403                         }
404                 }
405
406                 if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
407                         WSASetLastError(WSA_INVALID_PARAMETER);
408                         return(false);
409                 }
410
411                 WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
412                 io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
413                 DWORD event_index = 0;
414
415                 for splay_each(io_t, io, &io_tree) {
416                         events[event_index] = io->event;
417                         io_map[event_index] = io;
418                         event_index++;
419                 }
420
421                 /*
422                  * If the generation number changes due to event addition
423                  * or removal by a callback we restart the loop.
424                  */
425                 curgen = io_tree.generation;
426
427                 for(DWORD event_offset = 0; event_offset < event_count;) {
428                         DWORD result = WSAWaitForMultipleEvents(event_count - event_offset, &events[event_offset], FALSE, timeout_ms, FALSE);
429
430                         if(result == WSA_WAIT_TIMEOUT) {
431                                 break;
432                         }
433
434                         if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 + event_count - event_offset) {
435                                 return false;
436                         }
437
438                         /* Look up io in the map by index. */
439                         event_index = result - WSA_WAIT_EVENT_0 + event_offset;
440                         io_t *io = io_map[event_index];
441
442                         if(io->fd == -1) {
443                                 io->cb(io->data, 0);
444
445                                 if(curgen != io_tree.generation) {
446                                         break;
447                                 }
448                         } else {
449                                 WSANETWORKEVENTS network_events;
450
451                                 if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0) {
452                                         return(false);
453                                 }
454
455                                 if(network_events.lNetworkEvents & READ_EVENTS) {
456                                         io->cb(io->data, IO_READ);
457
458                                         if(curgen != io_tree.generation) {
459                                                 break;
460                                         }
461                                 }
462
463                                 /*
464                                     The fd might be available for write too. However, if we already fired the read callback, that
465                                     callback might have deleted the io (e.g. through terminate_connection()), so we can't fire the
466                                     write callback here. Instead, we loop back and let the writable io loop above handle it.
467                                  */
468                         }
469
470                         /* Continue checking the rest of the events. */
471                         event_offset = event_index + 1;
472
473                         /* Just poll the next time through. */
474                         timeout_ms = 0;
475                 }
476         }
477
478 #endif
479
480         return true;
481 }
482
483 void event_exit(void) {
484         running = false;
485 }