VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 63275

Last change on this file since 63275 was 63275, checked in by vboxsync, 8 years ago

NetworkServices: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 17.9 KB
Line 
1/* $Id: proxy_pollmgr.c 63275 2016-08-10 14:24:44Z vboxsync $ */
2/** @file
3 * NAT Network - poll manager.
4 */
5
6/*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#define LOG_GROUP LOG_GROUP_NAT_SERVICE
19
20#include "winutils.h"
21
22#include "proxy_pollmgr.h"
23#include "proxy.h"
24
25#ifndef RT_OS_WINDOWS
26#include <sys/socket.h>
27#include <netinet/in.h>
28#include <err.h>
29#include <errno.h>
30#include <poll.h>
31#include <stdio.h>
32#include <stdlib.h>
33#include <string.h>
34#include <time.h>
35#include <unistd.h>
36#else
37#include <iprt/err.h>
38#include <stdlib.h>
39#include <string.h>
40#include "winpoll.h"
41#endif
42
43#define POLLMGR_GARBAGE (-1)
44
45struct pollmgr {
46 struct pollfd *fds;
47 struct pollmgr_handler **handlers;
48 nfds_t capacity; /* allocated size of the arrays */
49 nfds_t nfds; /* part of the arrays in use */
50
51 /* channels (socketpair) for static slots */
52 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
53#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
54#define POLLMGR_CHFD_WR 1 /* - client side */
55} pollmgr;
56
57
58static void pollmgr_loop(void);
59
60static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
61static void pollmgr_refptr_delete(struct pollmgr_refptr *);
62
63
64/*
65 * We cannot portably peek at the length of the incoming datagram and
66 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
67 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
68 * probably more expensive (haven't measured) than doing an extra copy
69 * of data, since typical UDP datagrams are small enough to avoid
70 * fragmentation.
71 *
72 * We can use shared buffer here since we read from sockets
73 * sequentially in a loop over pollfd.
74 */
75u8_t pollmgr_udpbuf[64 * 1024];
76
77
78int
79pollmgr_init(void)
80{
81 struct pollfd *newfds;
82 struct pollmgr_handler **newhdls;
83 nfds_t newcap;
84 int status;
85 nfds_t i;
86
87 pollmgr.fds = NULL;
88 pollmgr.handlers = NULL;
89 pollmgr.capacity = 0;
90 pollmgr.nfds = 0;
91
92 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
93 pollmgr.chan[i][POLLMGR_CHFD_RD] = INVALID_SOCKET;
94 pollmgr.chan[i][POLLMGR_CHFD_WR] = INVALID_SOCKET;
95 }
96
97 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
98#ifndef RT_OS_WINDOWS
99 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
100 if (status < 0) {
101 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
102 goto cleanup_close;
103 }
104#else
105 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
106 if (RT_FAILURE(status)) {
107 goto cleanup_close;
108 }
109#endif
110 }
111
112
113 newcap = 16; /* XXX: magic */
114 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
115
116 newfds = (struct pollfd *)
117 malloc(newcap * sizeof(*pollmgr.fds));
118 if (newfds == NULL) {
119 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
120 goto cleanup_close;
121 }
122
123 newhdls = (struct pollmgr_handler **)
124 malloc(newcap * sizeof(*pollmgr.handlers));
125 if (newhdls == NULL) {
126 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
127 free(newfds);
128 goto cleanup_close;
129 }
130
131 pollmgr.capacity = newcap;
132 pollmgr.fds = newfds;
133 pollmgr.handlers = newhdls;
134
135 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
136
137 for (i = 0; i < pollmgr.capacity; ++i) {
138 pollmgr.fds[i].fd = INVALID_SOCKET;
139 pollmgr.fds[i].events = 0;
140 pollmgr.fds[i].revents = 0;
141 }
142
143 return 0;
144
145 cleanup_close:
146 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
147 SOCKET *chan = pollmgr.chan[i];
148 if (chan[POLLMGR_CHFD_RD] != INVALID_SOCKET) {
149 closesocket(chan[POLLMGR_CHFD_RD]);
150 closesocket(chan[POLLMGR_CHFD_WR]);
151 }
152 }
153
154 return -1;
155}
156
157
158/*
159 * Must be called before pollmgr loop is started, so no locking.
160 */
161SOCKET
162pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
163{
164 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
165 handler->slot = -1;
166 return INVALID_SOCKET;
167 }
168
169 pollmgr_add_at(slot, handler, pollmgr.chan[slot][POLLMGR_CHFD_RD], POLLIN);
170 return pollmgr.chan[slot][POLLMGR_CHFD_WR];
171}
172
173
174/*
175 * Must be called from pollmgr loop (via callbacks), so no locking.
176 */
177int
178pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
179{
180 int slot;
181
182 DPRINTF2(("%s: new fd %d\n", __func__, fd));
183
184 if (pollmgr.nfds == pollmgr.capacity) {
185 struct pollfd *newfds;
186 struct pollmgr_handler **newhdls;
187 nfds_t newcap;
188 nfds_t i;
189
190 newcap = pollmgr.capacity * 2;
191
192 newfds = (struct pollfd *)
193 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
194 if (newfds == NULL) {
195 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
196 handler->slot = -1;
197 return -1;
198 }
199
200 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
201 /* but don't update capacity yet! */
202
203 newhdls = (struct pollmgr_handler **)
204 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
205 if (newhdls == NULL) {
206 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
207 /* if we failed to realloc here, then fds points to the
208 * new array, but we pretend we still has old capacity */
209 handler->slot = -1;
210 return -1;
211 }
212
213 pollmgr.handlers = newhdls;
214 pollmgr.capacity = newcap;
215
216 for (i = pollmgr.nfds; i < newcap; ++i) {
217 newfds[i].fd = INVALID_SOCKET;
218 newfds[i].events = 0;
219 newfds[i].revents = 0;
220 newhdls[i] = NULL;
221 }
222 }
223
224 slot = pollmgr.nfds;
225 ++pollmgr.nfds;
226
227 pollmgr_add_at(slot, handler, fd, events);
228 return slot;
229}
230
231
232static void
233pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
234{
235 pollmgr.fds[slot].fd = fd;
236 pollmgr.fds[slot].events = events;
237 pollmgr.fds[slot].revents = 0;
238 pollmgr.handlers[slot] = handler;
239
240 handler->slot = slot;
241}
242
243
244ssize_t
245pollmgr_chan_send(int slot, void *buf, size_t nbytes)
246{
247 SOCKET fd;
248 ssize_t nsent;
249
250 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
251 return -1;
252 }
253
254 fd = pollmgr.chan[slot][POLLMGR_CHFD_WR];
255 nsent = send(fd, buf, (int)nbytes, 0);
256 if (nsent == SOCKET_ERROR) {
257 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
258 return -1;
259 }
260 else if ((size_t)nsent != nbytes) {
261 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
262 slot, (unsigned int)nsent));
263 return -1;
264 }
265
266 return nsent;
267}
268
269
270/**
271 * Receive a pointer sent over poll manager channel.
272 */
273void *
274pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
275{
276 void *ptr;
277 ssize_t nread;
278 NOREF(handler);
279
280 if (revents & POLLNVAL) {
281 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
282 /* NOTREACHED */
283 }
284
285 if (revents & (POLLERR | POLLHUP)) {
286 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
287 /* NOTREACHED */
288 }
289
290 LWIP_ASSERT1(revents & POLLIN);
291 nread = recv(fd, (char *)&ptr, sizeof(ptr), 0);
292
293 if (nread == SOCKET_ERROR) {
294 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
295 /* NOTREACHED */
296 }
297 if (nread != sizeof(ptr)) {
298 errx(EXIT_FAILURE, "chan %d: recv: read %d bytes",
299 (int)handler->slot, (int)nread);
300 /* NOTREACHED */
301 }
302
303 return ptr;
304}
305
306
307void
308pollmgr_update_events(int slot, int events)
309{
310 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
311 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
312
313 pollmgr.fds[slot].events = events;
314}
315
316
317void
318pollmgr_del_slot(int slot)
319{
320 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
321
322 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
323 __func__, slot, pollmgr.fds[slot].fd));
324
325 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
326}
327
328
329void
330pollmgr_thread(void *ignored)
331{
332 LWIP_UNUSED_ARG(ignored);
333 pollmgr_loop();
334}
335
336
337static void
338pollmgr_loop(void)
339{
340 int nready;
341 SOCKET delfirst;
342 SOCKET *pdelprev;
343 int i;
344
345 for (;;) {
346#ifndef RT_OS_WINDOWS
347 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
348#else
349 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
350 if (RT_FAILURE(rc)) {
351 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
352 /* NOTREACHED*/
353 }
354#endif
355
356 DPRINTF2(("%s: ready %d fd%s\n",
357 __func__, nready, (nready == 1 ? "" : "s")));
358
359 if (nready < 0) {
360 if (errno == EINTR) {
361 continue;
362 }
363
364 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
365 /* NOTREACHED*/
366 }
367 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
368 continue; /* - but be defensive */
369 }
370
371
372 delfirst = INVALID_SOCKET;
373 pdelprev = &delfirst;
374
375 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
376 struct pollmgr_handler *handler;
377 SOCKET fd;
378 int revents, nevents;
379
380 fd = pollmgr.fds[i].fd;
381 revents = pollmgr.fds[i].revents;
382
383 /*
384 * Channel handlers can request deletion of dynamic slots
385 * by calling pollmgr_del_slot() that clobbers slot's fd.
386 */
387 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
388 /* adjust count if events were pending for that slot */
389 if (revents != 0) {
390 --nready;
391 }
392
393 /* pretend that slot handler requested deletion */
394 nevents = -1;
395 goto update_events;
396 }
397
398 if (revents == 0) {
399 continue; /* next fd */
400 }
401 --nready;
402
403 handler = pollmgr.handlers[i];
404
405 if (handler != NULL && handler->callback != NULL) {
406#ifdef LWIP_PROXY_DEBUG
407# if LWIP_PROXY_DEBUG /* DEBUG */
408 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
409 if (revents == POLLIN) {
410 DPRINTF2(("%s: ch %d\n", __func__, i));
411 }
412 else {
413 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
414 __func__, i, revents));
415 }
416 }
417 else {
418 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
419 __func__, fd, revents));
420 }
421# endif /* LWIP_PROXY_DEBUG / DEBUG */
422#endif
423 nevents = (*handler->callback)(handler, fd, revents);
424 }
425 else {
426 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
427 if (handler == NULL) {
428 DPRINTF0(("NULL\n"));
429 }
430 else {
431 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
432 }
433 nevents = -1; /* delete it */
434 }
435
436 update_events:
437 if (nevents >= 0) {
438 if (nevents != pollmgr.fds[i].events) {
439 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
440 __func__, fd, nevents));
441 }
442 pollmgr.fds[i].events = nevents;
443 }
444 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
445 /* Don't garbage-collect channels. */
446 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
447 __func__, fd, i));
448 pollmgr.fds[i].fd = INVALID_SOCKET;
449 pollmgr.fds[i].events = 0;
450 pollmgr.fds[i].revents = 0;
451 pollmgr.handlers[i] = NULL;
452 }
453 else {
454 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
455
456 /* schedule for deletion (see g/c loop for details) */
457 *pdelprev = i; /* make previous entry point to us */
458 pdelprev = &pollmgr.fds[i].fd;
459
460 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
461 pollmgr.fds[i].events = POLLMGR_GARBAGE;
462 pollmgr.fds[i].revents = 0;
463 pollmgr.handlers[i] = NULL;
464 }
465 } /* processing loop */
466
467
468 /*
469 * Garbage collect and compact the array.
470 *
471 * We overload pollfd::fd of garbage entries to store the
472 * index of the next garbage entry. The garbage list is
473 * co-directional with the fds array. The index of the first
474 * entry is in "delfirst", the last entry "points to"
475 * INVALID_SOCKET.
476 *
477 * See update_events code for nevents < 0 at the end of the
478 * processing loop above.
479 */
480 while (delfirst != INVALID_SOCKET) {
481 const int last = pollmgr.nfds - 1;
482
483 /*
484 * We want a live entry in the last slot to swap into the
485 * freed slot, so make sure we have one.
486 */
487 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
488 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
489 {
490 /* drop garbage entry at the end of the array */
491 --pollmgr.nfds;
492
493 if (delfirst == last) {
494 /* congruent to delnext >= pollmgr.nfds test below */
495 delfirst = INVALID_SOCKET; /* done */
496 }
497 }
498 else {
499 const SOCKET delnext = pollmgr.fds[delfirst].fd;
500
501 /* copy live entry at the end to the first slot being freed */
502 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
503 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
504 pollmgr.handlers[delfirst]->slot = (int)delfirst;
505 --pollmgr.nfds;
506
507 if ((nfds_t)delnext >= pollmgr.nfds) {
508 delfirst = INVALID_SOCKET; /* done */
509 }
510 else {
511 delfirst = delnext;
512 }
513 }
514
515 pollmgr.fds[last].fd = INVALID_SOCKET;
516 pollmgr.fds[last].events = 0;
517 pollmgr.fds[last].revents = 0;
518 pollmgr.handlers[last] = NULL;
519 }
520 } /* poll loop */
521}
522
523
524/**
525 * Create strongly held refptr.
526 */
527struct pollmgr_refptr *
528pollmgr_refptr_create(struct pollmgr_handler *ptr)
529{
530 struct pollmgr_refptr *rp;
531
532 LWIP_ASSERT1(ptr != NULL);
533
534 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
535 if (rp == NULL) {
536 return NULL;
537 }
538
539 sys_mutex_new(&rp->lock);
540 rp->ptr = ptr;
541 rp->strong = 1;
542 rp->weak = 0;
543
544 return rp;
545}
546
547
548static void
549pollmgr_refptr_delete(struct pollmgr_refptr *rp)
550{
551 if (rp == NULL) {
552 return;
553 }
554
555 LWIP_ASSERT1(rp->strong == 0);
556 LWIP_ASSERT1(rp->weak == 0);
557
558 sys_mutex_free(&rp->lock);
559 free(rp);
560}
561
562
563/**
564 * Add weak reference before "rp" is sent over a poll manager channel.
565 */
566void
567pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
568{
569 sys_mutex_lock(&rp->lock);
570
571 LWIP_ASSERT1(rp->ptr != NULL);
572 LWIP_ASSERT1(rp->strong > 0);
573
574 ++rp->weak;
575
576 sys_mutex_unlock(&rp->lock);
577}
578
579
580/**
581 * Try to get the pointer from implicitely weak reference we've got
582 * from a channel.
583 *
584 * If we detect that the object is still strongly referenced, but no
585 * longer registered with the poll manager we abort strengthening
586 * conversion here b/c lwip thread callback is already scheduled to
587 * destruct the object.
588 */
589struct pollmgr_handler *
590pollmgr_refptr_get(struct pollmgr_refptr *rp)
591{
592 struct pollmgr_handler *handler;
593 size_t weak;
594
595 sys_mutex_lock(&rp->lock);
596
597 LWIP_ASSERT1(rp->weak > 0);
598 weak = --rp->weak;
599
600 handler = rp->ptr;
601 if (handler == NULL) {
602 LWIP_ASSERT1(rp->strong == 0);
603 sys_mutex_unlock(&rp->lock);
604 if (weak == 0) {
605 pollmgr_refptr_delete(rp);
606 }
607 return NULL;
608 }
609
610 LWIP_ASSERT1(rp->strong == 1);
611
612 /*
613 * Here we woild do:
614 *
615 * ++rp->strong;
616 *
617 * and then, after channel handler is done, we would decrement it
618 * back.
619 *
620 * Instead we check that the object is still registered with poll
621 * manager. If it is, there's no race with lwip thread trying to
622 * drop its strong reference, as lwip thread callback to destruct
623 * the object is always scheduled by its poll manager callback.
624 *
625 * Conversly, if we detect that the object is no longer registered
626 * with poll manager, we immediately abort. Since channel handler
627 * can't do anything useful anyway and would have to return
628 * immediately.
629 *
630 * Since channel handler would always find rp->strong as it had
631 * left it, just elide extra strong reference creation to avoid
632 * the whole back-and-forth.
633 */
634
635 if (handler->slot < 0) { /* no longer polling */
636 sys_mutex_unlock(&rp->lock);
637 return NULL;
638 }
639
640 sys_mutex_unlock(&rp->lock);
641 return handler;
642}
643
644
645/**
646 * Remove (the only) strong reference.
647 *
648 * If it were real strong/weak pointers, we should also call
649 * destructor for the referenced object, but
650 */
651void
652pollmgr_refptr_unref(struct pollmgr_refptr *rp)
653{
654 sys_mutex_lock(&rp->lock);
655
656 LWIP_ASSERT1(rp->strong == 1);
657 --rp->strong;
658
659 if (rp->strong > 0) {
660 sys_mutex_unlock(&rp->lock);
661 }
662 else {
663 size_t weak;
664
665 /* void *ptr = rp->ptr; */
666 rp->ptr = NULL;
667
668 /* delete ptr; // see doc comment */
669
670 weak = rp->weak;
671 sys_mutex_unlock(&rp->lock);
672 if (weak == 0) {
673 pollmgr_refptr_delete(rp);
674 }
675 }
676}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette