VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 94068

Last change on this file since 94068 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 22.0 KB
Line 
1/* $Id: proxy_pollmgr.c 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * NAT Network - poll manager.
4 */
5
6/*
7 * Copyright (C) 2013-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#define LOG_GROUP LOG_GROUP_NAT_SERVICE
19
20#include "winutils.h"
21
22#include "proxy_pollmgr.h"
23#include "proxy.h"
24
25#ifndef RT_OS_WINDOWS
26#include <sys/socket.h>
27#include <netinet/in.h>
28#include <err.h>
29#include <errno.h>
30#include <fcntl.h>
31#include <poll.h>
32#include <stdio.h>
33#include <stdlib.h>
34#include <string.h>
35#include <time.h>
36#include <unistd.h>
37#else
38#include <iprt/errcore.h>
39#include <stdlib.h>
40#include <string.h>
41#include "winpoll.h"
42#endif
43
44#include <iprt/req.h>
45#include <iprt/errcore.h>
46
47
48#define POLLMGR_GARBAGE (-1)
49
50
51enum {
52 POLLMGR_QUEUE = 0,
53
54 POLLMGR_SLOT_STATIC_COUNT,
55 POLLMGR_SLOT_FIRST_DYNAMIC = POLLMGR_SLOT_STATIC_COUNT
56};
57
58
59struct pollmgr_chan {
60 struct pollmgr_handler *handler;
61 void *arg;
62 bool arg_valid;
63};
64
65struct pollmgr {
66 struct pollfd *fds;
67 struct pollmgr_handler **handlers;
68 nfds_t capacity; /* allocated size of the arrays */
69 nfds_t nfds; /* part of the arrays in use */
70
71 /* channels (socketpair) for static slots */
72 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
73#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
74#define POLLMGR_CHFD_WR 1 /* - client side */
75
76
77 /* emulate channels with request queue */
78 RTREQQUEUE queue;
79 struct pollmgr_handler queue_handler;
80 struct pollmgr_chan chan_handlers[POLLMGR_CHAN_COUNT];
81} pollmgr;
82
83
84static int pollmgr_queue_callback(struct pollmgr_handler *, SOCKET, int);
85static void pollmgr_chan_call_handler(int, void *);
86
87static void pollmgr_loop(void);
88
89static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
90static void pollmgr_refptr_delete(struct pollmgr_refptr *);
91
92
93/*
94 * We cannot portably peek at the length of the incoming datagram and
95 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
96 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
97 * probably more expensive (haven't measured) than doing an extra copy
98 * of data, since typical UDP datagrams are small enough to avoid
99 * fragmentation.
100 *
101 * We can use shared buffer here since we read from sockets
102 * sequentially in a loop over pollfd.
103 */
104u8_t pollmgr_udpbuf[64 * 1024];
105
106
107int
108pollmgr_init(void)
109{
110 struct pollfd *newfds;
111 struct pollmgr_handler **newhdls;
112 nfds_t newcap;
113 int rc, status;
114 nfds_t i;
115
116 rc = RTReqQueueCreate(&pollmgr.queue);
117 if (RT_FAILURE(rc))
118 return -1;
119
120 pollmgr.fds = NULL;
121 pollmgr.handlers = NULL;
122 pollmgr.capacity = 0;
123 pollmgr.nfds = 0;
124
125 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
126 pollmgr.chan[i][POLLMGR_CHFD_RD] = INVALID_SOCKET;
127 pollmgr.chan[i][POLLMGR_CHFD_WR] = INVALID_SOCKET;
128 }
129
130 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
131#ifndef RT_OS_WINDOWS
132 int j;
133
134 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
135 if (status < 0) {
136 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
137 goto cleanup_close;
138 }
139
140 /* now manually make them O_NONBLOCK */
141 for (j = 0; j < 2; ++j) {
142 int s = pollmgr.chan[i][j];
143 int sflags;
144
145 sflags = fcntl(s, F_GETFL, 0);
146 if (sflags < 0) {
147 DPRINTF0(("F_GETFL: %R[sockerr]\n", errno));
148 goto cleanup_close;
149 }
150
151 status = fcntl(s, F_SETFL, sflags | O_NONBLOCK);
152 if (status < 0) {
153 DPRINTF0(("O_NONBLOCK: %R[sockerr]\n", errno));
154 goto cleanup_close;
155 }
156 }
157#else
158 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
159 if (RT_FAILURE(status)) {
160 goto cleanup_close;
161 }
162#endif
163 }
164
165
166 newcap = 16; /* XXX: magic */
167 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
168
169 newfds = (struct pollfd *)
170 malloc(newcap * sizeof(*pollmgr.fds));
171 if (newfds == NULL) {
172 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
173 goto cleanup_close;
174 }
175
176 newhdls = (struct pollmgr_handler **)
177 malloc(newcap * sizeof(*pollmgr.handlers));
178 if (newhdls == NULL) {
179 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
180 free(newfds);
181 goto cleanup_close;
182 }
183
184 pollmgr.capacity = newcap;
185 pollmgr.fds = newfds;
186 pollmgr.handlers = newhdls;
187
188 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
189
190 for (i = 0; i < pollmgr.capacity; ++i) {
191 pollmgr.fds[i].fd = INVALID_SOCKET;
192 pollmgr.fds[i].events = 0;
193 pollmgr.fds[i].revents = 0;
194 }
195
196 /* add request queue notification */
197 pollmgr.queue_handler.callback = pollmgr_queue_callback;
198 pollmgr.queue_handler.data = NULL;
199 pollmgr.queue_handler.slot = -1;
200
201 pollmgr_add_at(POLLMGR_QUEUE, &pollmgr.queue_handler,
202 pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_RD],
203 POLLIN);
204
205 return 0;
206
207 cleanup_close:
208 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
209 SOCKET *chan = pollmgr.chan[i];
210 if (chan[POLLMGR_CHFD_RD] != INVALID_SOCKET) {
211 closesocket(chan[POLLMGR_CHFD_RD]);
212 closesocket(chan[POLLMGR_CHFD_WR]);
213 }
214 }
215
216 return -1;
217}
218
219
220/*
221 * Add new channel. We now implement channels with request queue, so
222 * all channels get the same socket that triggers queue processing.
223 *
224 * Must be called before pollmgr loop is started, so no locking.
225 */
226SOCKET
227pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
228{
229 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, INVALID_SOCKET);
230 AssertReturn(handler != NULL && handler->callback != NULL, INVALID_SOCKET);
231
232 handler->slot = slot;
233 pollmgr.chan_handlers[slot].handler = handler;
234 return pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
235}
236
237
238/*
239 * This used to actually send data over the channel's socket. Now we
240 * queue a request and send single byte notification over shared
241 * POLLMGR_QUEUE socket.
242 */
243ssize_t
244pollmgr_chan_send(int slot, void *buf, size_t nbytes)
245{
246 static const char notification = 0x5a;
247
248 void *ptr;
249 SOCKET fd;
250 ssize_t nsent;
251 int rc;
252
253 AssertReturn(0 <= slot && slot < POLLMGR_CHAN_COUNT, -1);
254
255 /*
256 * XXX: Hack alert. We only ever "sent" single pointer which was
257 * simultaneously both the wakeup event for the poll and the
258 * argument for the channel handler that it read from the channel.
259 * So now we pass this pointer to the request and arrange for the
260 * handler to "read" it when it asks for it.
261 */
262 if (nbytes != sizeof(void *)) {
263 return -1;
264 }
265
266 ptr = *(void **)buf;
267
268 rc = RTReqQueueCallEx(pollmgr.queue, NULL, 0,
269 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
270 (PFNRT)pollmgr_chan_call_handler, 2,
271 slot, ptr);
272
273 fd = pollmgr.chan[POLLMGR_QUEUE][POLLMGR_CHFD_WR];
274 nsent = send(fd, &notification, 1, 0);
275 if (nsent == SOCKET_ERROR) {
276 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
277 return -1;
278 }
279 else if ((size_t)nsent != 1) {
280 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
281 slot, (unsigned int)nsent));
282 return -1;
283 }
284
285 /* caller thinks it's sending the pointer */
286 return sizeof(void *);
287}
288
289
290/*
291 * pollmgr_chan_send() sent us a notification, process the queue.
292 */
293static int
294pollmgr_queue_callback(struct pollmgr_handler *handler, SOCKET fd, int revents)
295{
296 ssize_t nread;
297 int sockerr;
298 int rc;
299
300 RT_NOREF(handler, revents);
301 Assert(pollmgr.queue != NIL_RTREQQUEUE);
302
303 nread = recv(fd, (char *)pollmgr_udpbuf, sizeof(pollmgr_udpbuf), 0);
304 sockerr = SOCKERRNO(); /* save now, may be clobbered */
305
306 if (nread == SOCKET_ERROR) {
307 DPRINTF0(("%s: recv: %R[sockerr]\n", __func__, sockerr));
308 return POLLIN;
309 }
310
311 DPRINTF2(("%s: read %zd\n", __func__, nread));
312 if (nread == 0) {
313 return POLLIN;
314 }
315
316 rc = RTReqQueueProcess(pollmgr.queue, 0);
317 if (RT_UNLIKELY(rc != VERR_TIMEOUT && RT_FAILURE_NP(rc))) {
318 DPRINTF0(("%s: RTReqQueueProcess: %Rrc\n", __func__, rc));
319 }
320
321 return POLLIN;
322}
323
324
325/*
326 * Queued requests use this function to emulate the call to the
327 * handler's callback.
328 */
329static void
330pollmgr_chan_call_handler(int slot, void *arg)
331{
332 struct pollmgr_handler *handler;
333 int nevents;
334
335 AssertReturnVoid(0 <= slot && slot < POLLMGR_CHAN_COUNT);
336
337 handler = pollmgr.chan_handlers[slot].handler;
338 AssertReturnVoid(handler != NULL && handler->callback != NULL);
339
340 /* arrange for pollmgr_chan_recv_ptr() to "receive" the arg */
341 pollmgr.chan_handlers[slot].arg = arg;
342 pollmgr.chan_handlers[slot].arg_valid = true;
343
344 nevents = handler->callback(handler, INVALID_SOCKET, POLLIN);
345 if (nevents != POLLIN) {
346 DPRINTF2(("%s: nevents=0x%x!\n", __func__, nevents));
347 }
348}
349
350
351/*
352 * "Receive" a pointer "sent" over poll manager channel.
353 */
354void *
355pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
356{
357 int slot;
358 void *ptr;
359
360 RT_NOREF(fd);
361
362 slot = handler->slot;
363 Assert(0 <= slot && slot < POLLMGR_CHAN_COUNT);
364
365 if (revents & POLLNVAL) {
366 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
367 /* NOTREACHED */
368 }
369
370 if (revents & (POLLERR | POLLHUP)) {
371 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
372 /* NOTREACHED */
373 }
374
375 LWIP_ASSERT1(revents & POLLIN);
376
377 if (!pollmgr.chan_handlers[slot].arg_valid) {
378 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
379 /* NOTREACHED */
380 }
381
382 ptr = pollmgr.chan_handlers[slot].arg;
383 pollmgr.chan_handlers[slot].arg_valid = false;
384
385 return ptr;
386}
387
388
389/*
390 * Must be called from pollmgr loop (via callbacks), so no locking.
391 */
392int
393pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
394{
395 int slot;
396
397 DPRINTF2(("%s: new fd %d\n", __func__, fd));
398
399 if (pollmgr.nfds == pollmgr.capacity) {
400 struct pollfd *newfds;
401 struct pollmgr_handler **newhdls;
402 nfds_t newcap;
403 nfds_t i;
404
405 newcap = pollmgr.capacity * 2;
406
407 newfds = (struct pollfd *)
408 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
409 if (newfds == NULL) {
410 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
411 handler->slot = -1;
412 return -1;
413 }
414
415 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
416 /* but don't update capacity yet! */
417
418 newhdls = (struct pollmgr_handler **)
419 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
420 if (newhdls == NULL) {
421 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
422 /* if we failed to realloc here, then fds points to the
423 * new array, but we pretend we still has old capacity */
424 handler->slot = -1;
425 return -1;
426 }
427
428 pollmgr.handlers = newhdls;
429 pollmgr.capacity = newcap;
430
431 for (i = pollmgr.nfds; i < newcap; ++i) {
432 newfds[i].fd = INVALID_SOCKET;
433 newfds[i].events = 0;
434 newfds[i].revents = 0;
435 newhdls[i] = NULL;
436 }
437 }
438
439 slot = pollmgr.nfds;
440 ++pollmgr.nfds;
441
442 pollmgr_add_at(slot, handler, fd, events);
443 return slot;
444}
445
446
447static void
448pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
449{
450 pollmgr.fds[slot].fd = fd;
451 pollmgr.fds[slot].events = events;
452 pollmgr.fds[slot].revents = 0;
453 pollmgr.handlers[slot] = handler;
454
455 handler->slot = slot;
456}
457
458
459void
460pollmgr_update_events(int slot, int events)
461{
462 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
463 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
464
465 pollmgr.fds[slot].events = events;
466}
467
468
469void
470pollmgr_del_slot(int slot)
471{
472 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
473
474 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
475 __func__, slot, pollmgr.fds[slot].fd));
476
477 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
478}
479
480
481void
482pollmgr_thread(void *ignored)
483{
484 LWIP_UNUSED_ARG(ignored);
485 pollmgr_loop();
486}
487
488
489static void
490pollmgr_loop(void)
491{
492 int nready;
493 SOCKET delfirst;
494 SOCKET *pdelprev;
495 int i;
496
497 for (;;) {
498#ifndef RT_OS_WINDOWS
499 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
500#else
501 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
502 if (RT_FAILURE(rc)) {
503 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
504 /* NOTREACHED*/
505 }
506#endif
507
508 DPRINTF2(("%s: ready %d fd%s\n",
509 __func__, nready, (nready == 1 ? "" : "s")));
510
511 if (nready < 0) {
512 if (errno == EINTR) {
513 continue;
514 }
515
516 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
517 /* NOTREACHED*/
518 }
519 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
520 continue; /* - but be defensive */
521 }
522
523
524 delfirst = INVALID_SOCKET;
525 pdelprev = &delfirst;
526
527 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
528 struct pollmgr_handler *handler;
529 SOCKET fd;
530 int revents, nevents;
531
532 fd = pollmgr.fds[i].fd;
533 revents = pollmgr.fds[i].revents;
534
535 /*
536 * Channel handlers can request deletion of dynamic slots
537 * by calling pollmgr_del_slot() that clobbers slot's fd.
538 */
539 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
540 /* adjust count if events were pending for that slot */
541 if (revents != 0) {
542 --nready;
543 }
544
545 /* pretend that slot handler requested deletion */
546 nevents = -1;
547 goto update_events;
548 }
549
550 if (revents == 0) {
551 continue; /* next fd */
552 }
553 --nready;
554
555 handler = pollmgr.handlers[i];
556
557 if (handler != NULL && handler->callback != NULL) {
558#ifdef LWIP_PROXY_DEBUG
559# if LWIP_PROXY_DEBUG /* DEBUG */
560 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
561 if (revents == POLLIN) {
562 DPRINTF2(("%s: ch %d\n", __func__, i));
563 }
564 else {
565 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
566 __func__, i, revents));
567 }
568 }
569 else {
570 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
571 __func__, fd, revents));
572 }
573# endif /* LWIP_PROXY_DEBUG / DEBUG */
574#endif
575 nevents = (*handler->callback)(handler, fd, revents);
576 }
577 else {
578 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
579 if (handler == NULL) {
580 DPRINTF0(("NULL\n"));
581 }
582 else {
583 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
584 }
585 nevents = -1; /* delete it */
586 }
587
588 update_events:
589 if (nevents >= 0) {
590 if (nevents != pollmgr.fds[i].events) {
591 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
592 __func__, fd, nevents));
593 }
594 pollmgr.fds[i].events = nevents;
595 }
596 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
597 /* Don't garbage-collect channels. */
598 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
599 __func__, fd, i));
600 pollmgr.fds[i].fd = INVALID_SOCKET;
601 pollmgr.fds[i].events = 0;
602 pollmgr.fds[i].revents = 0;
603 pollmgr.handlers[i] = NULL;
604 }
605 else {
606 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
607
608 /* schedule for deletion (see g/c loop for details) */
609 *pdelprev = i; /* make previous entry point to us */
610 pdelprev = &pollmgr.fds[i].fd;
611
612 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
613 pollmgr.fds[i].events = POLLMGR_GARBAGE;
614 pollmgr.fds[i].revents = 0;
615 pollmgr.handlers[i] = NULL;
616 }
617 } /* processing loop */
618
619
620 /*
621 * Garbage collect and compact the array.
622 *
623 * We overload pollfd::fd of garbage entries to store the
624 * index of the next garbage entry. The garbage list is
625 * co-directional with the fds array. The index of the first
626 * entry is in "delfirst", the last entry "points to"
627 * INVALID_SOCKET.
628 *
629 * See update_events code for nevents < 0 at the end of the
630 * processing loop above.
631 */
632 while (delfirst != INVALID_SOCKET) {
633 const int last = pollmgr.nfds - 1;
634
635 /*
636 * We want a live entry in the last slot to swap into the
637 * freed slot, so make sure we have one.
638 */
639 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
640 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
641 {
642 /* drop garbage entry at the end of the array */
643 --pollmgr.nfds;
644
645 if (delfirst == (SOCKET)last) {
646 /* congruent to delnext >= pollmgr.nfds test below */
647 delfirst = INVALID_SOCKET; /* done */
648 }
649 }
650 else {
651 const SOCKET delnext = pollmgr.fds[delfirst].fd;
652
653 /* copy live entry at the end to the first slot being freed */
654 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
655 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
656 pollmgr.handlers[delfirst]->slot = (int)delfirst;
657 --pollmgr.nfds;
658
659 if ((nfds_t)delnext >= pollmgr.nfds) {
660 delfirst = INVALID_SOCKET; /* done */
661 }
662 else {
663 delfirst = delnext;
664 }
665 }
666
667 pollmgr.fds[last].fd = INVALID_SOCKET;
668 pollmgr.fds[last].events = 0;
669 pollmgr.fds[last].revents = 0;
670 pollmgr.handlers[last] = NULL;
671 }
672 } /* poll loop */
673}
674
675
676/**
677 * Create strongly held refptr.
678 */
679struct pollmgr_refptr *
680pollmgr_refptr_create(struct pollmgr_handler *ptr)
681{
682 struct pollmgr_refptr *rp;
683
684 LWIP_ASSERT1(ptr != NULL);
685
686 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
687 if (rp == NULL) {
688 return NULL;
689 }
690
691 sys_mutex_new(&rp->lock);
692 rp->ptr = ptr;
693 rp->strong = 1;
694 rp->weak = 0;
695
696 return rp;
697}
698
699
700static void
701pollmgr_refptr_delete(struct pollmgr_refptr *rp)
702{
703 if (rp == NULL) {
704 return;
705 }
706
707 LWIP_ASSERT1(rp->strong == 0);
708 LWIP_ASSERT1(rp->weak == 0);
709
710 sys_mutex_free(&rp->lock);
711 free(rp);
712}
713
714
715/**
716 * Add weak reference before "rp" is sent over a poll manager channel.
717 */
718void
719pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
720{
721 sys_mutex_lock(&rp->lock);
722
723 LWIP_ASSERT1(rp->ptr != NULL);
724 LWIP_ASSERT1(rp->strong > 0);
725
726 ++rp->weak;
727
728 sys_mutex_unlock(&rp->lock);
729}
730
731
732/**
733 * Try to get the pointer from implicitely weak reference we've got
734 * from a channel.
735 *
736 * If we detect that the object is still strongly referenced, but no
737 * longer registered with the poll manager we abort strengthening
738 * conversion here b/c lwip thread callback is already scheduled to
739 * destruct the object.
740 */
741struct pollmgr_handler *
742pollmgr_refptr_get(struct pollmgr_refptr *rp)
743{
744 struct pollmgr_handler *handler;
745 size_t weak;
746
747 sys_mutex_lock(&rp->lock);
748
749 LWIP_ASSERT1(rp->weak > 0);
750 weak = --rp->weak;
751
752 handler = rp->ptr;
753 if (handler == NULL) {
754 LWIP_ASSERT1(rp->strong == 0);
755 sys_mutex_unlock(&rp->lock);
756 if (weak == 0) {
757 pollmgr_refptr_delete(rp);
758 }
759 return NULL;
760 }
761
762 LWIP_ASSERT1(rp->strong == 1);
763
764 /*
765 * Here we woild do:
766 *
767 * ++rp->strong;
768 *
769 * and then, after channel handler is done, we would decrement it
770 * back.
771 *
772 * Instead we check that the object is still registered with poll
773 * manager. If it is, there's no race with lwip thread trying to
774 * drop its strong reference, as lwip thread callback to destruct
775 * the object is always scheduled by its poll manager callback.
776 *
777 * Conversly, if we detect that the object is no longer registered
778 * with poll manager, we immediately abort. Since channel handler
779 * can't do anything useful anyway and would have to return
780 * immediately.
781 *
782 * Since channel handler would always find rp->strong as it had
783 * left it, just elide extra strong reference creation to avoid
784 * the whole back-and-forth.
785 */
786
787 if (handler->slot < 0) { /* no longer polling */
788 sys_mutex_unlock(&rp->lock);
789 return NULL;
790 }
791
792 sys_mutex_unlock(&rp->lock);
793 return handler;
794}
795
796
797/**
798 * Remove (the only) strong reference.
799 *
800 * If it were real strong/weak pointers, we should also call
801 * destructor for the referenced object, but
802 */
803void
804pollmgr_refptr_unref(struct pollmgr_refptr *rp)
805{
806 sys_mutex_lock(&rp->lock);
807
808 LWIP_ASSERT1(rp->strong == 1);
809 --rp->strong;
810
811 if (rp->strong > 0) {
812 sys_mutex_unlock(&rp->lock);
813 }
814 else {
815 size_t weak;
816
817 /* void *ptr = rp->ptr; */
818 rp->ptr = NULL;
819
820 /* delete ptr; // see doc comment */
821
822 weak = rp->weak;
823 sys_mutex_unlock(&rp->lock);
824 if (weak == 0) {
825 pollmgr_refptr_delete(rp);
826 }
827 }
828}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette