VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/pxtcp.c@ 58541

Last change on this file since 58541 was 58541, checked in by vboxsync, 9 years ago

NAT/Net: fix typo in comment

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 67.3 KB
Line 
1/* $Id: pxtcp.c 58541 2015-11-02 14:35:14Z vboxsync $ */
2/** @file
3 * NAT Network - TCP proxy.
4 */
5
6/*
7 * Copyright (C) 2013-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#define LOG_GROUP LOG_GROUP_NAT_SERVICE
19
20#include "winutils.h"
21
22#include "pxtcp.h"
23
24#include "proxy.h"
25#include "proxy_pollmgr.h"
26#include "pxremap.h"
27#include "portfwd.h" /* fwspec */
28
29#ifndef RT_OS_WINDOWS
30#include <sys/types.h>
31#include <sys/socket.h>
32#include <sys/ioctl.h>
33#ifdef RT_OS_SOLARIS
34#include <sys/filio.h> /* FIONREAD is BSD'ism */
35#endif
36#include <stdlib.h>
37#include <stdint.h>
38#include <stdio.h>
39#include <string.h>
40#include <poll.h>
41
42#include <err.h> /* BSD'ism */
43#else
44#include <stdlib.h>
45#include <stdio.h>
46#include <string.h>
47
48#include <iprt/stdint.h>
49#include "winpoll.h"
50#endif
51
52#include "lwip/opt.h"
53
54#include "lwip/sys.h"
55#include "lwip/tcpip.h"
56#include "lwip/netif.h"
57#include "lwip/tcp_impl.h" /* XXX: to access tcp_abandon() */
58#include "lwip/icmp.h"
59#include "lwip/icmp6.h"
60
61/*
62 * Different OSes have different quirks in reporting POLLHUP for TCP
63 * sockets.
64 *
65 * Using shutdown(2) "how" values here would be more readable, but
66 * since SHUT_RD is 0, we can't use 0 for "none", unfortunately.
67 */
68#if defined(RT_OS_NETBSD) || defined(RT_OS_SOLARIS)
69# define HAVE_TCP_POLLHUP 0 /* not reported */
70#elif defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
71# define HAVE_TCP_POLLHUP POLLIN /* reported when remote closes */
72#else
73# define HAVE_TCP_POLLHUP (POLLIN|POLLOUT) /* reported when both directions are closed */
74#endif
75
76
77/**
78 * Ring buffer for inbound data. Filled with data from the host
79 * socket on poll manager thread. Data consumed by scheduling
80 * tcp_write() to the pcb on the lwip thread.
81 *
82 * NB: There is actually third party present, the lwip stack itself.
83 * Thus the buffer doesn't have dual free vs. data split, but rather
84 * three-way free / send and unACKed data / unsent data split.
85 */
86struct ringbuf {
87 char *buf;
88 size_t bufsize;
89
90 /*
91 * Start of free space, producer writes here (up till "unacked").
92 */
93 volatile size_t vacant;
94
95 /*
96 * Start of sent but unacknowledged data. The data are "owned" by
97 * the stack as it may need to retransmit. This is the free space
98 * limit for producer.
99 */
100 volatile size_t unacked;
101
102 /*
103 * Start of unsent data, consumer reads/sends from here (up till
104 * "vacant"). Not declared volatile since it's only accessed from
105 * the consumer thread.
106 */
107 size_t unsent;
108};
109
110
111/**
112 */
113struct pxtcp {
114 /**
115 * Our poll manager handler. Must be first, strong/weak
116 * references depend on this "inheritance".
117 */
118 struct pollmgr_handler pmhdl;
119
120 /**
121 * lwIP (internal/guest) side of the proxied connection.
122 */
123 struct tcp_pcb *pcb;
124
125 /**
126 * Host (external) side of the proxied connection.
127 */
128 SOCKET sock;
129
130 /**
131 * Socket events we are currently polling for.
132 */
133 int events;
134
135 /**
136 * Socket error. Currently used to save connect(2) errors so that
137 * we can decide if we need to send ICMP error.
138 */
139 int sockerr;
140
141 /**
142 * Interface that we have got the SYN from. Needed to send ICMP
143 * with correct source address.
144 */
145 struct netif *netif;
146
147 /**
148 * For tentatively accepted connections for which we are in
149 * process of connecting to the real destination this is the
150 * initial pbuf that we might need to build ICMP error.
151 *
152 * When connection is established this is used to hold outbound
153 * pbuf chain received by pxtcp_pcb_recv() but not yet completely
154 * forwarded over the socket. We cannot "return" it to lwIP since
155 * the head of the chain is already sent and freed.
156 */
157 struct pbuf *unsent;
158
159 /**
160 * Guest has closed its side. Reported to pxtcp_pcb_recv() only
161 * once and we might not be able to forward it immediately if we
162 * have unsent pbuf.
163 */
164 int outbound_close;
165
166 /**
167 * Outbound half-close has been done on the socket.
168 */
169 int outbound_close_done;
170
171 /**
172 * External has closed its side. We might not be able to forward
173 * it immediately if we have unforwarded data.
174 */
175 int inbound_close;
176
177 /**
178 * Inbound half-close has been done on the pcb.
179 */
180 int inbound_close_done;
181
182 /**
183 * On systems that report POLLHUP as soon as the final FIN is
184 * received on a socket we cannot continue polling for the rest of
185 * input, so we have to read (pull) last data from the socket on
186 * the lwIP thread instead of polling/pushing it from the poll
187 * manager thread. See comment in pxtcp_pmgr_pump() POLLHUP case.
188 */
189 int inbound_pull;
190
191
192 /**
193 * When poll manager schedules delete we may not be able to delete
194 * a pxtcp immediately if not all inbound data has been acked by
195 * the guest: lwIP may need to resend and the data are in pxtcp's
196 * inbuf::buf. We defer delete until all data are acked to
197 * pxtcp_pcb_sent().
198 */
199 int deferred_delete;
200
201 /**
202 * Ring-buffer for inbound data.
203 */
204 struct ringbuf inbuf;
205
206 /**
207 * lwIP thread's strong reference to us.
208 */
209 struct pollmgr_refptr *rp;
210
211
212 /*
213 * We use static messages to call functions on the lwIP thread to
214 * void malloc/free overhead.
215 */
216 struct tcpip_msg msg_delete; /* delete pxtcp */
217 struct tcpip_msg msg_reset; /* reset connection and delete pxtcp */
218 struct tcpip_msg msg_accept; /* confirm accept of proxied connection */
219 struct tcpip_msg msg_outbound; /* trigger send of outbound data */
220 struct tcpip_msg msg_inbound; /* trigger send of inbound data */
221 struct tcpip_msg msg_inpull; /* trigger pull of last inbound data */
222};
223
224
225
226static struct pxtcp *pxtcp_allocate(void);
227static void pxtcp_free(struct pxtcp *);
228
229static void pxtcp_pcb_associate(struct pxtcp *, struct tcp_pcb *);
230static void pxtcp_pcb_dissociate(struct pxtcp *);
231
232/* poll manager callbacks for pxtcp related channels */
233static int pxtcp_pmgr_chan_add(struct pollmgr_handler *, SOCKET, int);
234static int pxtcp_pmgr_chan_pollout(struct pollmgr_handler *, SOCKET, int);
235static int pxtcp_pmgr_chan_pollin(struct pollmgr_handler *, SOCKET, int);
236#if !(HAVE_TCP_POLLHUP & POLLOUT)
237static int pxtcp_pmgr_chan_del(struct pollmgr_handler *, SOCKET, int);
238#endif
239static int pxtcp_pmgr_chan_reset(struct pollmgr_handler *, SOCKET, int);
240
241/* helper functions for sending/receiving pxtcp over poll manager channels */
242static ssize_t pxtcp_chan_send(enum pollmgr_slot_t, struct pxtcp *);
243static ssize_t pxtcp_chan_send_weak(enum pollmgr_slot_t, struct pxtcp *);
244static struct pxtcp *pxtcp_chan_recv(struct pollmgr_handler *, SOCKET, int);
245static struct pxtcp *pxtcp_chan_recv_strong(struct pollmgr_handler *, SOCKET, int);
246
247/* poll manager callbacks for individual sockets */
248static int pxtcp_pmgr_connect(struct pollmgr_handler *, SOCKET, int);
249static int pxtcp_pmgr_pump(struct pollmgr_handler *, SOCKET, int);
250
251/* get incoming traffic into ring buffer */
252static ssize_t pxtcp_sock_read(struct pxtcp *, int *);
253static ssize_t pxtcp_sock_recv(struct pxtcp *, IOVEC *, size_t); /* default */
254
255/* convenience functions for poll manager callbacks */
256static int pxtcp_schedule_delete(struct pxtcp *);
257static int pxtcp_schedule_reset(struct pxtcp *);
258static int pxtcp_schedule_reject(struct pxtcp *);
259
260/* lwip thread callbacks called via proxy_lwip_post() */
261static void pxtcp_pcb_delete_pxtcp(void *);
262static void pxtcp_pcb_reset_pxtcp(void *);
263static void pxtcp_pcb_accept_refuse(void *);
264static void pxtcp_pcb_accept_confirm(void *);
265static void pxtcp_pcb_write_outbound(void *);
266static void pxtcp_pcb_write_inbound(void *);
267static void pxtcp_pcb_pull_inbound(void *);
268
269/* tcp pcb callbacks */
270static err_t pxtcp_pcb_heard(void *, struct tcp_pcb *, err_t); /* global */
271static err_t pxtcp_pcb_accept(void *, struct tcp_pcb *, err_t);
272static err_t pxtcp_pcb_connected(void *, struct tcp_pcb *, err_t);
273static err_t pxtcp_pcb_recv(void *, struct tcp_pcb *, struct pbuf *, err_t);
274static err_t pxtcp_pcb_sent(void *, struct tcp_pcb *, u16_t);
275static err_t pxtcp_pcb_poll(void *, struct tcp_pcb *);
276static void pxtcp_pcb_err(void *, err_t);
277
278static err_t pxtcp_pcb_forward_outbound(struct pxtcp *, struct pbuf *);
279static void pxtcp_pcb_forward_outbound_close(struct pxtcp *);
280
281static ssize_t pxtcp_sock_send(struct pxtcp *, IOVEC *, size_t);
282
283static void pxtcp_pcb_forward_inbound(struct pxtcp *);
284static void pxtcp_pcb_forward_inbound_close(struct pxtcp *);
285DECLINLINE(int) pxtcp_pcb_forward_inbound_done(const struct pxtcp *);
286static void pxtcp_pcb_schedule_poll(struct pxtcp *);
287static void pxtcp_pcb_cancel_poll(struct pxtcp *);
288
289static void pxtcp_pcb_reject(struct tcp_pcb *, int, struct netif *, struct pbuf *);
290DECLINLINE(void) pxtcp_pcb_maybe_deferred_delete(struct pxtcp *);
291
292/* poll manager handlers for pxtcp channels */
293static struct pollmgr_handler pxtcp_pmgr_chan_add_hdl;
294static struct pollmgr_handler pxtcp_pmgr_chan_pollout_hdl;
295static struct pollmgr_handler pxtcp_pmgr_chan_pollin_hdl;
296#if !(HAVE_TCP_POLLHUP & POLLOUT)
297static struct pollmgr_handler pxtcp_pmgr_chan_del_hdl;
298#endif
299static struct pollmgr_handler pxtcp_pmgr_chan_reset_hdl;
300
301
302/**
303 * Init PXTCP - must be run when neither lwIP tcpip thread, nor poll
304 * manager threads haven't been created yet.
305 */
306void
307pxtcp_init(void)
308{
309 /*
310 * Create channels.
311 */
312#define CHANNEL(SLOT, NAME) do { \
313 NAME##_hdl.callback = NAME; \
314 NAME##_hdl.data = NULL; \
315 NAME##_hdl.slot = -1; \
316 pollmgr_add_chan(SLOT, &NAME##_hdl); \
317 } while (0)
318
319 CHANNEL(POLLMGR_CHAN_PXTCP_ADD, pxtcp_pmgr_chan_add);
320 CHANNEL(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp_pmgr_chan_pollin);
321 CHANNEL(POLLMGR_CHAN_PXTCP_POLLOUT, pxtcp_pmgr_chan_pollout);
322#if !(HAVE_TCP_POLLHUP & POLLOUT)
323 CHANNEL(POLLMGR_CHAN_PXTCP_DEL, pxtcp_pmgr_chan_del);
324#endif
325 CHANNEL(POLLMGR_CHAN_PXTCP_RESET, pxtcp_pmgr_chan_reset);
326
327#undef CHANNEL
328
329 /*
330 * Listen to outgoing connection from guest(s).
331 */
332 tcp_proxy_accept(pxtcp_pcb_heard);
333}
334
335
336/**
337 * Syntactic sugar for sending pxtcp pointer over poll manager
338 * channel. Used by lwip thread functions.
339 */
340static ssize_t
341pxtcp_chan_send(enum pollmgr_slot_t slot, struct pxtcp *pxtcp)
342{
343 return pollmgr_chan_send(slot, &pxtcp, sizeof(pxtcp));
344}
345
346
347/**
348 * Syntactic sugar for sending weak reference to pxtcp over poll
349 * manager channel. Used by lwip thread functions.
350 */
351static ssize_t
352pxtcp_chan_send_weak(enum pollmgr_slot_t slot, struct pxtcp *pxtcp)
353{
354 pollmgr_refptr_weak_ref(pxtcp->rp);
355 return pollmgr_chan_send(slot, &pxtcp->rp, sizeof(pxtcp->rp));
356}
357
358
359/**
360 * Counterpart of pxtcp_chan_send().
361 */
362static struct pxtcp *
363pxtcp_chan_recv(struct pollmgr_handler *handler, SOCKET fd, int revents)
364{
365 struct pxtcp *pxtcp;
366
367 pxtcp = (struct pxtcp *)pollmgr_chan_recv_ptr(handler, fd, revents);
368 return pxtcp;
369}
370
371
372/**
373 * Counterpart of pxtcp_chan_send_weak().
374 */
375static struct pxtcp *
376pxtcp_chan_recv_strong(struct pollmgr_handler *handler, SOCKET fd, int revents)
377{
378 struct pollmgr_refptr *rp;
379 struct pollmgr_handler *base;
380 struct pxtcp *pxtcp;
381
382 rp = (struct pollmgr_refptr *)pollmgr_chan_recv_ptr(handler, fd, revents);
383 base = (struct pollmgr_handler *)pollmgr_refptr_get(rp);
384 pxtcp = (struct pxtcp *)base;
385
386 return pxtcp;
387}
388
389
390/**
391 * Register pxtcp with poll manager.
392 *
393 * Used for POLLMGR_CHAN_PXTCP_ADD and by port-forwarding. Since
394 * error handling is different in these two cases, we leave it up to
395 * the caller.
396 */
397int
398pxtcp_pmgr_add(struct pxtcp *pxtcp)
399{
400 int status;
401
402 LWIP_ASSERT1(pxtcp != NULL);
403 LWIP_ASSERT1(pxtcp->sock >= 0);
404 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
405 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
406 LWIP_ASSERT1(pxtcp->pmhdl.slot < 0);
407
408 status = pollmgr_add(&pxtcp->pmhdl, pxtcp->sock, pxtcp->events);
409 return status;
410}
411
412
413/**
414 * Unregister pxtcp with poll manager.
415 *
416 * Used for POLLMGR_CHAN_PXTCP_RESET and by port-forwarding (on error
417 * leg).
418 */
419void
420pxtcp_pmgr_del(struct pxtcp *pxtcp)
421{
422 LWIP_ASSERT1(pxtcp != NULL);
423
424 pollmgr_del_slot(pxtcp->pmhdl.slot);
425}
426
427
428/**
429 * POLLMGR_CHAN_PXTCP_ADD handler.
430 *
431 * Get new pxtcp from lwip thread and start polling its socket.
432 */
433static int
434pxtcp_pmgr_chan_add(struct pollmgr_handler *handler, SOCKET fd, int revents)
435{
436 struct pxtcp *pxtcp;
437 int status;
438
439 pxtcp = pxtcp_chan_recv(handler, fd, revents);
440 DPRINTF0(("pxtcp_add: new pxtcp %p; pcb %p; sock %d\n",
441 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
442
443 status = pxtcp_pmgr_add(pxtcp);
444 if (status < 0) {
445 (void) pxtcp_schedule_reset(pxtcp);
446 }
447
448 return POLLIN;
449}
450
451
452/**
453 * POLLMGR_CHAN_PXTCP_POLLOUT handler.
454 *
455 * pxtcp_pcb_forward_outbound() on the lwIP thread tried to send data
456 * and failed, it now requests us to poll the socket for POLLOUT and
457 * schedule pxtcp_pcb_forward_outbound() when sock is writable again.
458 */
459static int
460pxtcp_pmgr_chan_pollout(struct pollmgr_handler *handler, SOCKET fd, int revents)
461{
462 struct pxtcp *pxtcp;
463
464 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
465 DPRINTF0(("pxtcp_pollout: pxtcp %p\n", (void *)pxtcp));
466
467 if (pxtcp == NULL) {
468 return POLLIN;
469 }
470
471 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
472 LWIP_ASSERT1(pxtcp->pmhdl.slot > 0);
473
474 pxtcp->events |= POLLOUT;
475 pollmgr_update_events(pxtcp->pmhdl.slot, pxtcp->events);
476
477 return POLLIN;
478}
479
480
481/**
482 * POLLMGR_CHAN_PXTCP_POLLIN handler.
483 */
484static int
485pxtcp_pmgr_chan_pollin(struct pollmgr_handler *handler, SOCKET fd, int revents)
486{
487 struct pxtcp *pxtcp;
488
489 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
490 DPRINTF2(("pxtcp_pollin: pxtcp %p\n", (void *)pxtcp));
491
492 if (pxtcp == NULL) {
493 return POLLIN;
494 }
495
496 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
497 LWIP_ASSERT1(pxtcp->pmhdl.slot > 0);
498
499 if (pxtcp->inbound_close) {
500 return POLLIN;
501 }
502
503 pxtcp->events |= POLLIN;
504 pollmgr_update_events(pxtcp->pmhdl.slot, pxtcp->events);
505
506 return POLLIN;
507}
508
509
510#if !(HAVE_TCP_POLLHUP & POLLOUT)
511/**
512 * POLLMGR_CHAN_PXTCP_DEL handler.
513 *
514 * Schedule pxtcp deletion. We only need this if host system doesn't
515 * report POLLHUP for fully closed tcp sockets.
516 */
517static int
518pxtcp_pmgr_chan_del(struct pollmgr_handler *handler, SOCKET fd, int revents)
519{
520 struct pxtcp *pxtcp;
521
522 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
523 if (pxtcp == NULL) {
524 return POLLIN;
525 }
526
527 DPRINTF(("PXTCP_DEL: pxtcp %p; pcb %p; sock %d\n",
528 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
529
530 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
531 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
532
533 LWIP_ASSERT1(pxtcp->inbound_close); /* EOF read */
534 LWIP_ASSERT1(pxtcp->outbound_close_done); /* EOF sent */
535
536 pxtcp_pmgr_del(pxtcp);
537 (void) pxtcp_schedule_delete(pxtcp);
538
539 return POLLIN;
540}
541#endif /* !(HAVE_TCP_POLLHUP & POLLOUT) */
542
543
544/**
545 * POLLMGR_CHAN_PXTCP_RESET handler.
546 *
547 * Close the socket with RST and delete pxtcp.
548 */
549static int
550pxtcp_pmgr_chan_reset(struct pollmgr_handler *handler, SOCKET fd, int revents)
551{
552 struct pxtcp *pxtcp;
553
554 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
555 if (pxtcp == NULL) {
556 return POLLIN;
557 }
558
559 DPRINTF0(("PXTCP_RESET: pxtcp %p; pcb %p; sock %d\n",
560 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
561
562 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
563 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
564
565 pxtcp_pmgr_del(pxtcp);
566
567 proxy_reset_socket(pxtcp->sock);
568 pxtcp->sock = INVALID_SOCKET;
569
570 (void) pxtcp_schedule_reset(pxtcp);
571
572 return POLLIN;
573}
574
575
576static struct pxtcp *
577pxtcp_allocate(void)
578{
579 struct pxtcp *pxtcp;
580
581 pxtcp = (struct pxtcp *)malloc(sizeof(*pxtcp));
582 if (pxtcp == NULL) {
583 return NULL;
584 }
585
586 pxtcp->pmhdl.callback = NULL;
587 pxtcp->pmhdl.data = (void *)pxtcp;
588 pxtcp->pmhdl.slot = -1;
589
590 pxtcp->pcb = NULL;
591 pxtcp->sock = INVALID_SOCKET;
592 pxtcp->events = 0;
593 pxtcp->sockerr = 0;
594 pxtcp->netif = NULL;
595 pxtcp->unsent = NULL;
596 pxtcp->outbound_close = 0;
597 pxtcp->outbound_close_done = 0;
598 pxtcp->inbound_close = 0;
599 pxtcp->inbound_close_done = 0;
600 pxtcp->inbound_pull = 0;
601 pxtcp->deferred_delete = 0;
602
603 pxtcp->inbuf.bufsize = 64 * 1024;
604 pxtcp->inbuf.buf = (char *)malloc(pxtcp->inbuf.bufsize);
605 if (pxtcp->inbuf.buf == NULL) {
606 free(pxtcp);
607 return NULL;
608 }
609 pxtcp->inbuf.vacant = 0;
610 pxtcp->inbuf.unacked = 0;
611 pxtcp->inbuf.unsent = 0;
612
613 pxtcp->rp = pollmgr_refptr_create(&pxtcp->pmhdl);
614 if (pxtcp->rp == NULL) {
615 free(pxtcp->inbuf.buf);
616 free(pxtcp);
617 return NULL;
618 }
619
620#define CALLBACK_MSG(MSG, FUNC) \
621 do { \
622 pxtcp->MSG.type = TCPIP_MSG_CALLBACK_STATIC; \
623 pxtcp->MSG.sem = NULL; \
624 pxtcp->MSG.msg.cb.function = FUNC; \
625 pxtcp->MSG.msg.cb.ctx = (void *)pxtcp; \
626 } while (0)
627
628 CALLBACK_MSG(msg_delete, pxtcp_pcb_delete_pxtcp);
629 CALLBACK_MSG(msg_reset, pxtcp_pcb_reset_pxtcp);
630 CALLBACK_MSG(msg_accept, pxtcp_pcb_accept_confirm);
631 CALLBACK_MSG(msg_outbound, pxtcp_pcb_write_outbound);
632 CALLBACK_MSG(msg_inbound, pxtcp_pcb_write_inbound);
633 CALLBACK_MSG(msg_inpull, pxtcp_pcb_pull_inbound);
634
635#undef CALLBACK_MSG
636
637 return pxtcp;
638}
639
640
641/**
642 * Exported to fwtcp to create pxtcp for incoming port-forwarded
643 * connections. Completed with pcb in pxtcp_pcb_connect().
644 */
645struct pxtcp *
646pxtcp_create_forwarded(SOCKET sock)
647{
648 struct pxtcp *pxtcp;
649
650 pxtcp = pxtcp_allocate();
651 if (pxtcp == NULL) {
652 return NULL;
653 }
654
655 pxtcp->sock = sock;
656 pxtcp->pmhdl.callback = pxtcp_pmgr_pump;
657 pxtcp->events = 0;
658
659 return pxtcp;
660}
661
662
663static void
664pxtcp_pcb_associate(struct pxtcp *pxtcp, struct tcp_pcb *pcb)
665{
666 LWIP_ASSERT1(pxtcp != NULL);
667 LWIP_ASSERT1(pcb != NULL);
668
669 pxtcp->pcb = pcb;
670
671 tcp_arg(pcb, pxtcp);
672
673 tcp_recv(pcb, pxtcp_pcb_recv);
674 tcp_sent(pcb, pxtcp_pcb_sent);
675 tcp_poll(pcb, NULL, 255);
676 tcp_err(pcb, pxtcp_pcb_err);
677}
678
679
680static void
681pxtcp_free(struct pxtcp *pxtcp)
682{
683 if (pxtcp->unsent != NULL) {
684 pbuf_free(pxtcp->unsent);
685 }
686 if (pxtcp->inbuf.buf != NULL) {
687 free(pxtcp->inbuf.buf);
688 }
689 free(pxtcp);
690}
691
692
693/**
694 * Counterpart to pxtcp_create_forwarded() to destruct pxtcp that
695 * fwtcp failed to register with poll manager to post to lwip thread
696 * for doing connect.
697 */
698void
699pxtcp_cancel_forwarded(struct pxtcp *pxtcp)
700{
701 LWIP_ASSERT1(pxtcp->pcb == NULL);
702 pxtcp_pcb_reset_pxtcp(pxtcp);
703}
704
705
706static void
707pxtcp_pcb_dissociate(struct pxtcp *pxtcp)
708{
709 if (pxtcp == NULL || pxtcp->pcb == NULL) {
710 return;
711 }
712
713 DPRINTF(("%s: pxtcp %p <-> pcb %p\n",
714 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
715
716 /*
717 * We must have dissociated from a fully closed pcb immediately
718 * since lwip recycles them and we don't wan't to mess with what
719 * would be someone else's pcb that we happen to have a stale
720 * pointer to.
721 */
722 LWIP_ASSERT1(pxtcp->pcb->callback_arg == pxtcp);
723
724 tcp_recv(pxtcp->pcb, NULL);
725 tcp_sent(pxtcp->pcb, NULL);
726 tcp_poll(pxtcp->pcb, NULL, 255);
727 tcp_err(pxtcp->pcb, NULL);
728 tcp_arg(pxtcp->pcb, NULL);
729 pxtcp->pcb = NULL;
730}
731
732
733/**
734 * Lwip thread callback invoked via pxtcp::msg_delete
735 *
736 * Since we use static messages to communicate to the lwip thread, we
737 * cannot delete pxtcp without making sure there are no unprocessed
738 * messages in the lwip thread mailbox.
739 *
740 * The easiest way to ensure that is to send this "delete" message as
741 * the last one and when it's processed we know there are no more and
742 * it's safe to delete pxtcp.
743 *
744 * Poll manager handlers should use pxtcp_schedule_delete()
745 * convenience function.
746 */
747static void
748pxtcp_pcb_delete_pxtcp(void *ctx)
749{
750 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
751
752 DPRINTF(("%s: pxtcp %p, pcb %p, sock %d%s\n",
753 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock,
754 (pxtcp->deferred_delete && !pxtcp->inbound_pull
755 ? " (was deferred)" : "")));
756
757 LWIP_ASSERT1(pxtcp != NULL);
758 LWIP_ASSERT1(pxtcp->pmhdl.slot < 0);
759 LWIP_ASSERT1(pxtcp->outbound_close_done);
760 LWIP_ASSERT1(pxtcp->inbound_close); /* not necessarily done */
761
762
763 /*
764 * pxtcp is no longer registered with poll manager, so it's safe
765 * to close the socket.
766 */
767 if (pxtcp->sock != INVALID_SOCKET) {
768 closesocket(pxtcp->sock);
769 pxtcp->sock = INVALID_SOCKET;
770 }
771
772 /*
773 * We might have already dissociated from a fully closed pcb, or
774 * guest might have sent us a reset while msg_delete was in
775 * transit. If there's no pcb, we are done.
776 */
777 if (pxtcp->pcb == NULL) {
778 pollmgr_refptr_unref(pxtcp->rp);
779 pxtcp_free(pxtcp);
780 return;
781 }
782
783 /*
784 * Have we completely forwarded all inbound traffic to the guest?
785 *
786 * We may still be waiting for ACKs. We may have failed to send
787 * some of the data (tcp_write() failed with ERR_MEM). We may
788 * have failed to send the FIN (tcp_shutdown() failed with
789 * ERR_MEM).
790 */
791 if (pxtcp_pcb_forward_inbound_done(pxtcp)) {
792 pxtcp_pcb_dissociate(pxtcp);
793 pollmgr_refptr_unref(pxtcp->rp);
794 pxtcp_free(pxtcp);
795 }
796 else {
797 DPRINTF2(("delete: pxtcp %p; pcb %p:"
798 " unacked %d, unsent %d, vacant %d, %s - DEFER!\n",
799 (void *)pxtcp, (void *)pxtcp->pcb,
800 (int)pxtcp->inbuf.unacked,
801 (int)pxtcp->inbuf.unsent,
802 (int)pxtcp->inbuf.vacant,
803 pxtcp->inbound_close_done ? "FIN sent" : "FIN is NOT sent"));
804
805 LWIP_ASSERT1(!pxtcp->deferred_delete);
806 pxtcp->deferred_delete = 1;
807 }
808}
809
810
811/**
812 * If we couldn't delete pxtcp right away in the msg_delete callback
813 * from the poll manager thread, we repeat the check at the end of
814 * relevant pcb callbacks.
815 */
816DECLINLINE(void)
817pxtcp_pcb_maybe_deferred_delete(struct pxtcp *pxtcp)
818{
819 if (pxtcp->deferred_delete && pxtcp_pcb_forward_inbound_done(pxtcp)) {
820 pxtcp_pcb_delete_pxtcp(pxtcp);
821 }
822}
823
824
825/**
826 * Poll manager callbacks should use this convenience wrapper to
827 * schedule pxtcp deletion on the lwip thread and to deregister from
828 * the poll manager.
829 */
830static int
831pxtcp_schedule_delete(struct pxtcp *pxtcp)
832{
833 /*
834 * If pollmgr_refptr_get() is called by any channel before
835 * scheduled deletion happens, let them know we are gone.
836 */
837 pxtcp->pmhdl.slot = -1;
838
839 /*
840 * Schedule deletion. Since poll manager thread may be pre-empted
841 * right after we send the message, the deletion may actually
842 * happen on the lwip thread before we return from this function,
843 * so it's not safe to refer to pxtcp after this call.
844 */
845 proxy_lwip_post(&pxtcp->msg_delete);
846
847 /* tell poll manager to deregister us */
848 return -1;
849}
850
851
852/**
853 * Lwip thread callback invoked via pxtcp::msg_reset
854 *
855 * Like pxtcp_pcb_delete(), but sends RST to the guest before
856 * deleting this pxtcp.
857 */
858static void
859pxtcp_pcb_reset_pxtcp(void *ctx)
860{
861 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
862 LWIP_ASSERT1(pxtcp != NULL);
863
864 DPRINTF0(("%s: pxtcp %p, pcb %p, sock %d\n",
865 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
866
867 if (pxtcp->sock != INVALID_SOCKET) {
868 proxy_reset_socket(pxtcp->sock);
869 pxtcp->sock = INVALID_SOCKET;
870 }
871
872 if (pxtcp->pcb != NULL) {
873 struct tcp_pcb *pcb = pxtcp->pcb;
874 pxtcp_pcb_dissociate(pxtcp);
875 tcp_abort(pcb);
876 }
877
878 pollmgr_refptr_unref(pxtcp->rp);
879 pxtcp_free(pxtcp);
880}
881
882
883
884/**
885 * Poll manager callbacks should use this convenience wrapper to
886 * schedule pxtcp reset and deletion on the lwip thread and to
887 * deregister from the poll manager.
888 *
889 * See pxtcp_schedule_delete() for additional comments.
890 */
891static int
892pxtcp_schedule_reset(struct pxtcp *pxtcp)
893{
894 pxtcp->pmhdl.slot = -1;
895 proxy_lwip_post(&pxtcp->msg_reset);
896 return -1;
897}
898
899
900/**
901 * Reject proxy connection attempt. Depending on the cause (sockerr)
902 * we may just drop the pcb silently, generate an ICMP datagram or
903 * send TCP reset.
904 */
905static void
906pxtcp_pcb_reject(struct tcp_pcb *pcb, int sockerr,
907 struct netif *netif, struct pbuf *p)
908{
909 int reset = 0;
910
911 if (sockerr == ECONNREFUSED) {
912 reset = 1;
913 }
914 else if (p != NULL) {
915 struct netif *oif;
916
917 LWIP_ASSERT1(netif != NULL);
918
919 oif = ip_current_netif();
920 ip_current_netif() = netif;
921
922 if (PCB_ISIPV6(pcb)) {
923 if (sockerr == EHOSTDOWN) {
924 icmp6_dest_unreach(p, ICMP6_DUR_ADDRESS); /* XXX: ??? */
925 }
926 else if (sockerr == EHOSTUNREACH
927 || sockerr == ENETDOWN
928 || sockerr == ENETUNREACH)
929 {
930 icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE);
931 }
932 }
933 else {
934 if (sockerr == EHOSTDOWN
935 || sockerr == EHOSTUNREACH
936 || sockerr == ENETDOWN
937 || sockerr == ENETUNREACH)
938 {
939 icmp_dest_unreach(p, ICMP_DUR_HOST);
940 }
941 }
942
943 ip_current_netif() = oif;
944 }
945
946 tcp_abandon(pcb, reset);
947}
948
949
950/**
951 * Called from poll manager thread via pxtcp::msg_accept when proxy
952 * failed to connect to the destination. Also called when we failed
953 * to register pxtcp with poll manager.
954 *
955 * This is like pxtcp_pcb_reset_pxtcp() but is more discriminate in
956 * how this unestablished connection is terminated.
957 */
958static void
959pxtcp_pcb_accept_refuse(void *ctx)
960{
961 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
962
963 DPRINTF0(("%s: pxtcp %p, pcb %p, sock %d: %R[sockerr]\n",
964 __func__, (void *)pxtcp, (void *)pxtcp->pcb,
965 pxtcp->sock, pxtcp->sockerr));
966
967 LWIP_ASSERT1(pxtcp != NULL);
968 LWIP_ASSERT1(pxtcp->sock == INVALID_SOCKET);
969
970 if (pxtcp->pcb != NULL) {
971 struct tcp_pcb *pcb = pxtcp->pcb;
972 pxtcp_pcb_dissociate(pxtcp);
973 pxtcp_pcb_reject(pcb, pxtcp->sockerr, pxtcp->netif, pxtcp->unsent);
974 }
975
976 pollmgr_refptr_unref(pxtcp->rp);
977 pxtcp_free(pxtcp);
978}
979
980
981/**
982 * Convenience wrapper for poll manager connect callback to reject
983 * connection attempt.
984 *
985 * Like pxtcp_schedule_reset(), but the callback is more discriminate
986 * in how this unestablished connection is terminated.
987 */
988static int
989pxtcp_schedule_reject(struct pxtcp *pxtcp)
990{
991 pxtcp->msg_accept.msg.cb.function = pxtcp_pcb_accept_refuse;
992 pxtcp->pmhdl.slot = -1;
993 proxy_lwip_post(&pxtcp->msg_accept);
994 return -1;
995}
996
997
998/**
999 * Global tcp_proxy_accept() callback for proxied outgoing TCP
1000 * connections from guest(s).
1001 */
1002static err_t
1003pxtcp_pcb_heard(void *arg, struct tcp_pcb *newpcb, err_t error)
1004{
1005 struct pbuf *p = (struct pbuf *)arg;
1006 struct pxtcp *pxtcp;
1007 ipX_addr_t dst_addr;
1008 int sdom;
1009 SOCKET sock;
1010 ssize_t nsent;
1011 int sockerr = 0;
1012
1013 LWIP_UNUSED_ARG(error); /* always ERR_OK */
1014
1015 /*
1016 * TCP first calls accept callback when it receives the first SYN
1017 * and "tentatively accepts" new proxied connection attempt. When
1018 * proxy "confirms" the SYN and sends SYN|ACK and the guest
1019 * replies with ACK the accept callback is called again, this time
1020 * with the established connection.
1021 */
1022 LWIP_ASSERT1(newpcb->state == SYN_RCVD_0);
1023 tcp_accept(newpcb, pxtcp_pcb_accept);
1024 tcp_arg(newpcb, NULL);
1025
1026 tcp_setprio(newpcb, TCP_PRIO_MAX);
1027
1028 pxremap_outbound_ipX(PCB_ISIPV6(newpcb), &dst_addr, &newpcb->local_ip);
1029
1030 sdom = PCB_ISIPV6(newpcb) ? PF_INET6 : PF_INET;
1031 sock = proxy_connected_socket(sdom, SOCK_STREAM,
1032 &dst_addr, newpcb->local_port);
1033 if (sock == INVALID_SOCKET) {
1034 sockerr = SOCKERRNO();
1035 goto abort;
1036 }
1037
1038 pxtcp = pxtcp_allocate();
1039 if (pxtcp == NULL) {
1040 proxy_reset_socket(sock);
1041 goto abort;
1042 }
1043
1044 /* save initial datagram in case we need to reply with ICMP */
1045 pbuf_ref(p);
1046 pxtcp->unsent = p;
1047 pxtcp->netif = ip_current_netif();
1048
1049 pxtcp_pcb_associate(pxtcp, newpcb);
1050 pxtcp->sock = sock;
1051
1052 pxtcp->pmhdl.callback = pxtcp_pmgr_connect;
1053 pxtcp->events = POLLOUT;
1054
1055 nsent = pxtcp_chan_send(POLLMGR_CHAN_PXTCP_ADD, pxtcp);
1056 if (nsent < 0) {
1057 pxtcp->sock = INVALID_SOCKET;
1058 proxy_reset_socket(sock);
1059 pxtcp_pcb_accept_refuse(pxtcp);
1060 return ERR_ABRT;
1061 }
1062
1063 return ERR_OK;
1064
1065 abort:
1066 DPRINTF0(("%s: pcb %p, sock %d: %R[sockerr]\n",
1067 __func__, (void *)newpcb, sock, sockerr));
1068 pxtcp_pcb_reject(newpcb, sockerr, ip_current_netif(), p);
1069 return ERR_ABRT;
1070}
1071
1072
1073/**
1074 * tcp_proxy_accept() callback for accepted proxied outgoing TCP
1075 * connections from guest(s). This is "real" accept with three-way
1076 * handshake completed.
1077 */
1078static err_t
1079pxtcp_pcb_accept(void *arg, struct tcp_pcb *pcb, err_t error)
1080{
1081 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1082
1083 LWIP_UNUSED_ARG(pcb); /* used only in asserts */
1084 LWIP_UNUSED_ARG(error); /* always ERR_OK */
1085
1086 LWIP_ASSERT1(pxtcp != NULL);
1087 LWIP_ASSERT1(pxtcp->pcb = pcb);
1088 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1089
1090 /* send any inbound data that are already queued */
1091 pxtcp_pcb_forward_inbound(pxtcp);
1092 return ERR_OK;
1093}
1094
1095
1096/**
1097 * Initial poll manager callback for proxied outgoing TCP connections.
1098 * pxtcp_pcb_accept() sets pxtcp::pmhdl::callback to this.
1099 *
1100 * Waits for connect(2) to the destination to complete. On success
1101 * replaces itself with pxtcp_pmgr_pump() callback common to all
1102 * established TCP connections.
1103 */
1104static int
1105pxtcp_pmgr_connect(struct pollmgr_handler *handler, SOCKET fd, int revents)
1106{
1107 struct pxtcp *pxtcp;
1108
1109 pxtcp = (struct pxtcp *)handler->data;
1110 LWIP_ASSERT1(handler == &pxtcp->pmhdl);
1111 LWIP_ASSERT1(fd == pxtcp->sock);
1112 LWIP_ASSERT1(pxtcp->sockerr == 0);
1113
1114 if (revents & POLLNVAL) {
1115 pxtcp->sock = INVALID_SOCKET;
1116 pxtcp->sockerr = ETIMEDOUT;
1117 return pxtcp_schedule_reject(pxtcp);
1118 }
1119
1120 /*
1121 * Solaris and NetBSD don't report either POLLERR or POLLHUP when
1122 * connect(2) fails, just POLLOUT. In that case we always need to
1123 * check SO_ERROR.
1124 */
1125#if defined(RT_OS_SOLARIS) || defined(RT_OS_NETBSD)
1126# define CONNECT_CHECK_ERROR POLLOUT
1127#else
1128# define CONNECT_CHECK_ERROR (POLLERR | POLLHUP)
1129#endif
1130
1131 /*
1132 * Check the cause of the failure so that pxtcp_pcb_reject() may
1133 * behave accordingly.
1134 */
1135 if (revents & CONNECT_CHECK_ERROR) {
1136 socklen_t optlen = (socklen_t)sizeof(pxtcp->sockerr);
1137 int status;
1138 SOCKET s;
1139
1140 status = getsockopt(pxtcp->sock, SOL_SOCKET, SO_ERROR,
1141 (char *)&pxtcp->sockerr, &optlen);
1142 if (RT_UNLIKELY(status == SOCKET_ERROR)) { /* should not happen */
1143 DPRINTF(("%s: sock %d: SO_ERROR failed: %R[sockerr]\n",
1144 __func__, fd, SOCKERRNO()));
1145 pxtcp->sockerr = ETIMEDOUT;
1146 }
1147 else {
1148 /* don't spam this log on successful connect(2) */
1149 if ((revents & (POLLERR | POLLHUP)) /* we were told it's failed */
1150 || pxtcp->sockerr != 0) /* we determined it's failed */
1151 {
1152 DPRINTF(("%s: sock %d: connect: %R[sockerr]\n",
1153 __func__, fd, pxtcp->sockerr));
1154 }
1155
1156 if ((revents & (POLLERR | POLLHUP))
1157 && RT_UNLIKELY(pxtcp->sockerr == 0))
1158 {
1159 /* if we're told it's failed, make sure it's marked as such */
1160 pxtcp->sockerr = ETIMEDOUT;
1161 }
1162 }
1163
1164 if (pxtcp->sockerr != 0) {
1165 s = pxtcp->sock;
1166 pxtcp->sock = INVALID_SOCKET;
1167 closesocket(s);
1168 return pxtcp_schedule_reject(pxtcp);
1169 }
1170 }
1171
1172 if (revents & POLLOUT) { /* connect is successful */
1173 /* confirm accept to the guest */
1174 proxy_lwip_post(&pxtcp->msg_accept);
1175
1176 /*
1177 * Switch to common callback used for all established proxied
1178 * connections.
1179 */
1180 pxtcp->pmhdl.callback = pxtcp_pmgr_pump;
1181
1182 /*
1183 * Initially we poll for incoming traffic only. Outgoing
1184 * traffic is fast-forwarded by pxtcp_pcb_recv(); if it fails
1185 * it will ask us to poll for POLLOUT too.
1186 */
1187 pxtcp->events = POLLIN;
1188 return pxtcp->events;
1189 }
1190
1191 /* should never get here */
1192 DPRINTF0(("%s: pxtcp %p, sock %d: unexpected revents 0x%x\n",
1193 __func__, (void *)pxtcp, fd, revents));
1194 return pxtcp_schedule_reset(pxtcp);
1195}
1196
1197
1198/**
1199 * Called from poll manager thread via pxtcp::msg_accept when proxy
1200 * connected to the destination. Finalize accept by sending SYN|ACK
1201 * to the guest.
1202 */
1203static void
1204pxtcp_pcb_accept_confirm(void *ctx)
1205{
1206 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1207 err_t error;
1208
1209 LWIP_ASSERT1(pxtcp != NULL);
1210 if (pxtcp->pcb == NULL) {
1211 return;
1212 }
1213
1214 /* we are not going to reply with ICMP, so we can drop initial pbuf */
1215 LWIP_ASSERT1(pxtcp->unsent != NULL);
1216 pbuf_free(pxtcp->unsent);
1217 pxtcp->unsent = NULL;
1218
1219 error = tcp_proxy_accept_confirm(pxtcp->pcb);
1220
1221 /*
1222 * If lwIP failed to enqueue SYN|ACK because it's out of pbufs it
1223 * abandons the pcb. Retrying that is not very easy, since it
1224 * would require keeping "fractional state". From guest's point
1225 * of view there is no reply to its SYN so it will either resend
1226 * the SYN (effetively triggering full connection retry for us),
1227 * or it will eventually time out.
1228 */
1229 if (error == ERR_ABRT) {
1230 pxtcp->pcb = NULL; /* pcb is gone */
1231 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
1232 }
1233
1234 /*
1235 * else if (error != ERR_OK): even if tcp_output() failed with
1236 * ERR_MEM - don't give up, that SYN|ACK is enqueued and will be
1237 * retransmitted eventually.
1238 */
1239}
1240
1241
1242/**
1243 * Entry point for port-forwarding.
1244 *
1245 * fwtcp accepts new incoming connection, creates pxtcp for the socket
1246 * (with no pcb yet) and adds it to the poll manager (polling for
1247 * errors only). Then it calls this function to construct the pcb and
1248 * perform connection to the guest.
1249 */
1250void
1251pxtcp_pcb_connect(struct pxtcp *pxtcp, const struct fwspec *fwspec)
1252{
1253 struct sockaddr_storage ss;
1254 socklen_t sslen;
1255 struct tcp_pcb *pcb;
1256 ipX_addr_t src_addr, dst_addr;
1257 u16_t src_port, dst_port;
1258 int status;
1259 err_t error;
1260
1261 LWIP_ASSERT1(pxtcp != NULL);
1262 LWIP_ASSERT1(pxtcp->pcb == NULL);
1263 LWIP_ASSERT1(fwspec->stype == SOCK_STREAM);
1264
1265 pcb = tcp_new();
1266 if (pcb == NULL) {
1267 goto reset;
1268 }
1269
1270 tcp_setprio(pcb, TCP_PRIO_MAX);
1271 pxtcp_pcb_associate(pxtcp, pcb);
1272
1273 sslen = sizeof(ss);
1274 status = getpeername(pxtcp->sock, (struct sockaddr *)&ss, &sslen);
1275 if (status == SOCKET_ERROR) {
1276 goto reset;
1277 }
1278
1279 /* nit: compares PF and AF, but they are the same everywhere */
1280 LWIP_ASSERT1(ss.ss_family == fwspec->sdom);
1281
1282 status = fwany_ipX_addr_set_src(&src_addr, (const struct sockaddr *)&ss);
1283 if (status == PXREMAP_FAILED) {
1284 goto reset;
1285 }
1286
1287 if (ss.ss_family == PF_INET) {
1288 const struct sockaddr_in *peer4 = (const struct sockaddr_in *)&ss;
1289
1290 src_port = peer4->sin_port;
1291
1292 memcpy(&dst_addr.ip4, &fwspec->dst.sin.sin_addr, sizeof(ip_addr_t));
1293 dst_port = fwspec->dst.sin.sin_port;
1294 }
1295 else { /* PF_INET6 */
1296 const struct sockaddr_in6 *peer6 = (const struct sockaddr_in6 *)&ss;
1297 ip_set_v6(pcb, 1);
1298
1299 src_port = peer6->sin6_port;
1300
1301 memcpy(&dst_addr.ip6, &fwspec->dst.sin6.sin6_addr, sizeof(ip6_addr_t));
1302 dst_port = fwspec->dst.sin6.sin6_port;
1303 }
1304
1305 /* lwip port arguments are in host order */
1306 src_port = ntohs(src_port);
1307 dst_port = ntohs(dst_port);
1308
1309 error = tcp_proxy_bind(pcb, ipX_2_ip(&src_addr), src_port);
1310 if (error != ERR_OK) {
1311 goto reset;
1312 }
1313
1314 error = tcp_connect(pcb, ipX_2_ip(&dst_addr), dst_port,
1315 /* callback: */ pxtcp_pcb_connected);
1316 if (error != ERR_OK) {
1317 goto reset;
1318 }
1319
1320 return;
1321
1322 reset:
1323 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
1324}
1325
1326
1327/**
1328 * Port-forwarded connection to guest is successful, pump data.
1329 */
1330static err_t
1331pxtcp_pcb_connected(void *arg, struct tcp_pcb *pcb, err_t error)
1332{
1333 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1334
1335 LWIP_ASSERT1(error == ERR_OK); /* always called with ERR_OK */
1336 LWIP_UNUSED_ARG(error);
1337
1338 LWIP_ASSERT1(pxtcp != NULL);
1339 LWIP_ASSERT1(pxtcp->pcb == pcb);
1340 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1341 LWIP_UNUSED_ARG(pcb);
1342
1343 DPRINTF0(("%s: new pxtcp %p; pcb %p; sock %d\n",
1344 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
1345
1346 /* ACK on connection is like ACK on data in pxtcp_pcb_sent() */
1347 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp);
1348
1349 return ERR_OK;
1350}
1351
1352
1353/**
1354 * tcp_recv() callback.
1355 */
1356static err_t
1357pxtcp_pcb_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t error)
1358{
1359 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1360
1361 LWIP_ASSERT1(error == ERR_OK); /* always called with ERR_OK */
1362 LWIP_UNUSED_ARG(error);
1363
1364 LWIP_ASSERT1(pxtcp != NULL);
1365 LWIP_ASSERT1(pxtcp->pcb == pcb);
1366 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1367 LWIP_UNUSED_ARG(pcb);
1368
1369
1370 /*
1371 * Have we done sending previous batch?
1372 */
1373 if (pxtcp->unsent != NULL) {
1374 if (p != NULL) {
1375 /*
1376 * Return an error to tell TCP to hold onto that pbuf.
1377 * It will be presented to us later from tcp_fasttmr().
1378 */
1379 return ERR_WOULDBLOCK;
1380 }
1381 else {
1382 /*
1383 * Unlike data, p == NULL indicating orderly shutdown is
1384 * NOT presented to us again
1385 */
1386 pxtcp->outbound_close = 1;
1387 return ERR_OK;
1388 }
1389 }
1390
1391
1392 /*
1393 * Guest closed?
1394 */
1395 if (p == NULL) {
1396 pxtcp->outbound_close = 1;
1397 pxtcp_pcb_forward_outbound_close(pxtcp);
1398 return ERR_OK;
1399 }
1400
1401
1402 /*
1403 * Got data, send what we can without blocking.
1404 */
1405 return pxtcp_pcb_forward_outbound(pxtcp, p);
1406}
1407
1408
1409/**
1410 * Guest half-closed its TX side of the connection.
1411 *
1412 * Called either immediately from pxtcp_pcb_recv() when it gets NULL,
1413 * or from pxtcp_pcb_forward_outbound() when it finishes forwarding
1414 * previously unsent data and sees pxtcp::outbound_close flag saved by
1415 * pxtcp_pcb_recv().
1416 */
1417static void
1418pxtcp_pcb_forward_outbound_close(struct pxtcp *pxtcp)
1419{
1420 struct tcp_pcb *pcb;
1421
1422 LWIP_ASSERT1(pxtcp != NULL);
1423 LWIP_ASSERT1(pxtcp->outbound_close);
1424 LWIP_ASSERT1(!pxtcp->outbound_close_done);
1425
1426 pcb = pxtcp->pcb;
1427 LWIP_ASSERT1(pcb != NULL);
1428
1429 DPRINTF(("outbound_close: pxtcp %p; pcb %p %s\n",
1430 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
1431
1432
1433 /* set the flag first, since shutdown() may trigger POLLHUP */
1434 pxtcp->outbound_close_done = 1;
1435 shutdown(pxtcp->sock, SHUT_WR); /* half-close the socket */
1436
1437#if !(HAVE_TCP_POLLHUP & POLLOUT)
1438 /*
1439 * We need to nudge poll manager manually, since OS will not
1440 * report POLLHUP.
1441 */
1442 if (pxtcp->inbound_close) {
1443 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_DEL, pxtcp);
1444 }
1445#endif
1446
1447
1448 /* no more outbound data coming to us */
1449 tcp_recv(pcb, NULL);
1450
1451 /*
1452 * If we have already done inbound close previously (active close
1453 * on the pcb), then we must not hold onto a pcb in TIME_WAIT
1454 * state since those will be recycled by lwip when it runs out of
1455 * free pcbs in the pool.
1456 *
1457 * The test is true also for a pcb in CLOSING state that waits
1458 * just for the ACK of its FIN (to transition to TIME_WAIT).
1459 */
1460 if (pxtcp_pcb_forward_inbound_done(pxtcp)) {
1461 pxtcp_pcb_dissociate(pxtcp);
1462 }
1463}
1464
1465
1466/**
1467 * Forward outbound data from pcb to socket.
1468 *
1469 * Called by pxtcp_pcb_recv() to forward new data and by callout
1470 * triggered by POLLOUT on the socket to send previously unsent data.
1471 *
1472 * (Re)scehdules one-time callout if not all data are sent.
1473 */
1474static err_t
1475pxtcp_pcb_forward_outbound(struct pxtcp *pxtcp, struct pbuf *p)
1476{
1477 struct pbuf *qs, *q;
1478 size_t qoff;
1479 size_t forwarded;
1480 int sockerr;
1481
1482 LWIP_ASSERT1(pxtcp->unsent == NULL || pxtcp->unsent == p);
1483
1484 forwarded = 0;
1485 sockerr = 0;
1486
1487 q = NULL;
1488 qoff = 0;
1489
1490 qs = p;
1491 while (qs != NULL) {
1492 IOVEC iov[8];
1493 const size_t iovsize = sizeof(iov)/sizeof(iov[0]);
1494 size_t fwd1;
1495 ssize_t nsent;
1496 size_t i;
1497
1498 fwd1 = 0;
1499 for (i = 0, q = qs; i < iovsize && q != NULL; ++i, q = q->next) {
1500 LWIP_ASSERT1(q->len > 0);
1501 IOVEC_SET_BASE(iov[i], q->payload);
1502 IOVEC_SET_LEN(iov[i], q->len);
1503 fwd1 += q->len;
1504 }
1505
1506 /*
1507 * TODO: This is where application-level proxy can hook into
1508 * to process outbound traffic.
1509 */
1510 nsent = pxtcp_sock_send(pxtcp, iov, i);
1511
1512 if (nsent == (ssize_t)fwd1) {
1513 /* successfully sent this chain fragment completely */
1514 forwarded += nsent;
1515 qs = q;
1516 }
1517 else if (nsent >= 0) {
1518 /* successfully sent only some data */
1519 forwarded += nsent;
1520
1521 /* find the first pbuf that was not completely forwarded */
1522 qoff = nsent;
1523 for (i = 0, q = qs; i < iovsize && q != NULL; ++i, q = q->next) {
1524 if (qoff < q->len) {
1525 break;
1526 }
1527 qoff -= q->len;
1528 }
1529 LWIP_ASSERT1(q != NULL);
1530 LWIP_ASSERT1(qoff < q->len);
1531 break;
1532 }
1533 else {
1534 sockerr = -nsent;
1535
1536 /*
1537 * Some errors are really not errors - if we get them,
1538 * it's not different from getting nsent == 0, so filter
1539 * them out here.
1540 */
1541 if (proxy_error_is_transient(sockerr)) {
1542 sockerr = 0;
1543 }
1544 q = qs;
1545 qoff = 0;
1546 break;
1547 }
1548 }
1549
1550 if (forwarded > 0) {
1551 DPRINTF2(("forward_outbound: pxtcp %p, pcb %p: sent %d bytes\n",
1552 (void *)pxtcp, (void *)pxtcp->pcb, (int)forwarded));
1553 tcp_recved(pxtcp->pcb, (u16_t)forwarded);
1554 }
1555
1556 if (q == NULL) { /* everything is forwarded? */
1557 LWIP_ASSERT1(sockerr == 0);
1558 LWIP_ASSERT1(forwarded == p->tot_len);
1559
1560 pxtcp->unsent = NULL;
1561 pbuf_free(p);
1562 if (pxtcp->outbound_close) {
1563 pxtcp_pcb_forward_outbound_close(pxtcp);
1564 }
1565 }
1566 else {
1567 if (q != p) {
1568 /* free forwarded pbufs at the beginning of the chain */
1569 pbuf_ref(q);
1570 pbuf_free(p);
1571 }
1572 if (qoff > 0) {
1573 /* advance payload pointer past the forwarded part */
1574 pbuf_header(q, -(s16_t)qoff);
1575 }
1576 pxtcp->unsent = q;
1577 DPRINTF2(("forward_outbound: pxtcp %p, pcb %p: kept %d bytes\n",
1578 (void *)pxtcp, (void *)pxtcp->pcb, (int)q->tot_len));
1579
1580 /*
1581 * Have sendmsg() failed?
1582 *
1583 * Connection reset will be detected by poll and
1584 * pxtcp_schedule_reset() will be called.
1585 *
1586 * Otherwise something *really* unexpected must have happened,
1587 * so we'd better abort.
1588 */
1589 if (sockerr != 0 && sockerr != ECONNRESET) {
1590 struct tcp_pcb *pcb = pxtcp->pcb;
1591 DPRINTF2(("forward_outbound: pxtcp %p, pcb %p: %R[sockerr]\n",
1592 (void *)pxtcp, (void *)pcb, sockerr));
1593
1594 pxtcp_pcb_dissociate(pxtcp);
1595
1596 tcp_abort(pcb);
1597
1598 /* call error callback manually since we've already dissociated */
1599 pxtcp_pcb_err((void *)pxtcp, ERR_ABRT);
1600 return ERR_ABRT;
1601 }
1602
1603 /* schedule one-shot POLLOUT on the socket */
1604 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLOUT, pxtcp);
1605 }
1606 return ERR_OK;
1607}
1608
1609
1610#if !defined(RT_OS_WINDOWS)
1611static ssize_t
1612pxtcp_sock_send(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1613{
1614 struct msghdr mh;
1615 ssize_t nsent;
1616
1617#ifdef MSG_NOSIGNAL
1618 const int send_flags = MSG_NOSIGNAL;
1619#else
1620 const int send_flags = 0;
1621#endif
1622
1623 memset(&mh, 0, sizeof(mh));
1624
1625 mh.msg_iov = iov;
1626 mh.msg_iovlen = iovlen;
1627
1628 nsent = sendmsg(pxtcp->sock, &mh, send_flags);
1629 if (nsent < 0) {
1630 nsent = -SOCKERRNO();
1631 }
1632
1633 return nsent;
1634}
1635#else /* RT_OS_WINDOWS */
1636static ssize_t
1637pxtcp_sock_send(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1638{
1639 DWORD nsent;
1640 int status;
1641
1642 status = WSASend(pxtcp->sock, iov, (DWORD)iovlen, &nsent,
1643 0, NULL, NULL);
1644 if (status == SOCKET_ERROR) {
1645 return -SOCKERRNO();
1646 }
1647
1648 return nsent;
1649}
1650#endif /* RT_OS_WINDOWS */
1651
1652
1653/**
1654 * Callback from poll manager (on POLLOUT) to send data from
1655 * pxtcp::unsent pbuf to socket.
1656 */
1657static void
1658pxtcp_pcb_write_outbound(void *ctx)
1659{
1660 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1661 LWIP_ASSERT1(pxtcp != NULL);
1662
1663 if (pxtcp->pcb == NULL) {
1664 return;
1665 }
1666
1667 pxtcp_pcb_forward_outbound(pxtcp, pxtcp->unsent);
1668}
1669
1670
1671/**
1672 * Common poll manager callback used by both outgoing and incoming
1673 * (port-forwarded) connections that has connected socket.
1674 */
1675static int
1676pxtcp_pmgr_pump(struct pollmgr_handler *handler, SOCKET fd, int revents)
1677{
1678 struct pxtcp *pxtcp;
1679 int status;
1680 int sockerr;
1681
1682 pxtcp = (struct pxtcp *)handler->data;
1683 LWIP_ASSERT1(handler == &pxtcp->pmhdl);
1684 LWIP_ASSERT1(fd == pxtcp->sock);
1685
1686 if (revents & POLLNVAL) {
1687 pxtcp->sock = INVALID_SOCKET;
1688 return pxtcp_schedule_reset(pxtcp);
1689 }
1690
1691 if (revents & POLLERR) {
1692 socklen_t optlen = (socklen_t)sizeof(sockerr);
1693
1694 status = getsockopt(pxtcp->sock, SOL_SOCKET, SO_ERROR,
1695 (char *)&sockerr, &optlen);
1696 if (status == SOCKET_ERROR) { /* should not happen */
1697 DPRINTF(("sock %d: SO_ERROR failed: %R[sockerr]\n",
1698 fd, SOCKERRNO()));
1699 }
1700 else {
1701 DPRINTF0(("sock %d: %R[sockerr]\n", fd, sockerr));
1702 }
1703 return pxtcp_schedule_reset(pxtcp);
1704 }
1705
1706 if (revents & POLLOUT) {
1707 pxtcp->events &= ~POLLOUT;
1708 proxy_lwip_post(&pxtcp->msg_outbound);
1709 }
1710
1711 if (revents & POLLIN) {
1712 ssize_t nread;
1713 int stop_pollin;
1714
1715 nread = pxtcp_sock_read(pxtcp, &stop_pollin);
1716 if (nread < 0) {
1717 sockerr = -(int)nread;
1718 DPRINTF0(("sock %d: %R[sockerr]\n", fd, sockerr));
1719 return pxtcp_schedule_reset(pxtcp);
1720 }
1721
1722 if (stop_pollin) {
1723 pxtcp->events &= ~POLLIN;
1724 }
1725
1726 if (nread > 0) {
1727 proxy_lwip_post(&pxtcp->msg_inbound);
1728#if !HAVE_TCP_POLLHUP
1729 /*
1730 * If host does not report POLLHUP for closed sockets
1731 * (e.g. NetBSD) we should check for full close manually.
1732 */
1733 if (pxtcp->inbound_close && pxtcp->outbound_close_done) {
1734 LWIP_ASSERT1((revents & POLLHUP) == 0);
1735 return pxtcp_schedule_delete(pxtcp);
1736 }
1737#endif
1738 }
1739 }
1740
1741#if !HAVE_TCP_POLLHUP
1742 LWIP_ASSERT1((revents & POLLHUP) == 0);
1743#else
1744 if (revents & POLLHUP) {
1745 DPRINTF(("sock %d: HUP\n", fd));
1746#if HAVE_TCP_POLLHUP == POLLIN
1747 /*
1748 * Remote closed inbound.
1749 */
1750 if (!pxtcp->outbound_close_done) {
1751 /*
1752 * We might still need to poll for POLLOUT, but we can not
1753 * poll for POLLIN anymore (even if not all data are read)
1754 * because we will be spammed by POLLHUP.
1755 */
1756 pxtcp->events &= ~POLLIN;
1757 if (!pxtcp->inbound_close) {
1758 /* the rest of the input has to be pulled */
1759 proxy_lwip_post(&pxtcp->msg_inpull);
1760 }
1761 }
1762 else
1763#endif
1764 /*
1765 * Both directions are closed.
1766 */
1767 {
1768 LWIP_ASSERT1(pxtcp->outbound_close_done);
1769
1770 if (pxtcp->inbound_close) {
1771 /* there's no unread data, we are done */
1772 return pxtcp_schedule_delete(pxtcp);
1773 }
1774 else {
1775 /* pull the rest of the input first (deferred_delete) */
1776 pxtcp->pmhdl.slot = -1;
1777 proxy_lwip_post(&pxtcp->msg_inpull);
1778 return -1;
1779 }
1780 /* NOTREACHED */
1781 }
1782
1783 }
1784#endif /* HAVE_TCP_POLLHUP */
1785
1786 return pxtcp->events;
1787}
1788
1789
1790/**
1791 * Read data from socket to ringbuf. This may be used both on lwip
1792 * and poll manager threads.
1793 *
1794 * Flag pointed to by pstop is set when further reading is impossible,
1795 * either temporary when buffer is full, or permanently when EOF is
1796 * received.
1797 *
1798 * Returns number of bytes read. NB: EOF is reported as 1!
1799 *
1800 * Returns zero if nothing was read, either because buffer is full, or
1801 * if no data is available (EWOULDBLOCK, EINTR &c).
1802 *
1803 * Returns -errno on real socket errors.
1804 */
1805static ssize_t
1806pxtcp_sock_read(struct pxtcp *pxtcp, int *pstop)
1807{
1808 IOVEC iov[2];
1809 size_t iovlen;
1810 ssize_t nread;
1811
1812 const size_t sz = pxtcp->inbuf.bufsize;
1813 size_t beg, lim, wrnew;
1814
1815 *pstop = 0;
1816
1817 beg = pxtcp->inbuf.vacant;
1818 IOVEC_SET_BASE(iov[0], &pxtcp->inbuf.buf[beg]);
1819
1820 /* lim is the index we can NOT write to */
1821 lim = pxtcp->inbuf.unacked;
1822 if (lim == 0) {
1823 lim = sz - 1; /* empty slot at the end */
1824 }
1825 else if (lim == 1 && beg != 0) {
1826 lim = sz; /* empty slot at the beginning */
1827 }
1828 else {
1829 --lim;
1830 }
1831
1832 if (beg == lim) {
1833 /*
1834 * Buffer is full, stop polling for POLLIN.
1835 *
1836 * pxtcp_pcb_sent() will re-enable POLLIN when guest ACKs
1837 * data, freeing space in the ring buffer.
1838 */
1839 *pstop = 1;
1840 return 0;
1841 }
1842
1843 if (beg < lim) {
1844 /* free space in one chunk */
1845 iovlen = 1;
1846 IOVEC_SET_LEN(iov[0], lim - beg);
1847 }
1848 else {
1849 /* free space in two chunks */
1850 iovlen = 2;
1851 IOVEC_SET_LEN(iov[0], sz - beg);
1852 IOVEC_SET_BASE(iov[1], &pxtcp->inbuf.buf[0]);
1853 IOVEC_SET_LEN(iov[1], lim);
1854 }
1855
1856 /*
1857 * TODO: This is where application-level proxy can hook into to
1858 * process inbound traffic.
1859 */
1860 nread = pxtcp_sock_recv(pxtcp, iov, iovlen);
1861
1862 if (nread > 0) {
1863 wrnew = beg + nread;
1864 if (wrnew >= sz) {
1865 wrnew -= sz;
1866 }
1867 pxtcp->inbuf.vacant = wrnew;
1868 DPRINTF2(("pxtcp %p: sock %d read %d bytes\n",
1869 (void *)pxtcp, pxtcp->sock, (int)nread));
1870 return nread;
1871 }
1872 else if (nread == 0) {
1873 *pstop = 1;
1874 pxtcp->inbound_close = 1;
1875 DPRINTF2(("pxtcp %p: sock %d read EOF\n",
1876 (void *)pxtcp, pxtcp->sock));
1877 return 1;
1878 }
1879 else {
1880 int sockerr = -nread;
1881
1882 if (proxy_error_is_transient(sockerr)) {
1883 /* haven't read anything, just return */
1884 DPRINTF2(("pxtcp %p: sock %d read cancelled\n",
1885 (void *)pxtcp, pxtcp->sock));
1886 return 0;
1887 }
1888 else {
1889 /* socket error! */
1890 DPRINTF0(("pxtcp %p: sock %d read: %R[sockerr]\n",
1891 (void *)pxtcp, pxtcp->sock, sockerr));
1892 return -sockerr;
1893 }
1894 }
1895}
1896
1897
1898#if !defined(RT_OS_WINDOWS)
1899static ssize_t
1900pxtcp_sock_recv(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1901{
1902 struct msghdr mh;
1903 ssize_t nread;
1904
1905 memset(&mh, 0, sizeof(mh));
1906
1907 mh.msg_iov = iov;
1908 mh.msg_iovlen = iovlen;
1909
1910 nread = recvmsg(pxtcp->sock, &mh, 0);
1911 if (nread < 0) {
1912 nread = -SOCKERRNO();
1913 }
1914
1915 return nread;
1916}
1917#else /* RT_OS_WINDOWS */
1918static ssize_t
1919pxtcp_sock_recv(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1920{
1921 DWORD flags;
1922 DWORD nread;
1923 int status;
1924
1925 flags = 0;
1926 status = WSARecv(pxtcp->sock, iov, (DWORD)iovlen, &nread,
1927 &flags, NULL, NULL);
1928 if (status == SOCKET_ERROR) {
1929 return -SOCKERRNO();
1930 }
1931
1932 return (ssize_t)nread;
1933}
1934#endif /* RT_OS_WINDOWS */
1935
1936
1937/**
1938 * Callback from poll manager (pxtcp::msg_inbound) to trigger output
1939 * from ringbuf to guest.
1940 */
1941static void
1942pxtcp_pcb_write_inbound(void *ctx)
1943{
1944 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1945 LWIP_ASSERT1(pxtcp != NULL);
1946
1947 if (pxtcp->pcb == NULL) {
1948 return;
1949 }
1950
1951 pxtcp_pcb_forward_inbound(pxtcp);
1952}
1953
1954
1955/**
1956 * tcp_poll() callback
1957 *
1958 * We swtich it on when tcp_write() or tcp_shutdown() fail with
1959 * ERR_MEM to prevent connection from stalling. If there are ACKs or
1960 * more inbound data then pxtcp_pcb_forward_inbound() will be
1961 * triggered again, but if neither happens, tcp_poll() comes to the
1962 * rescue.
1963 */
1964static err_t
1965pxtcp_pcb_poll(void *arg, struct tcp_pcb *pcb)
1966{
1967 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1968 LWIP_UNUSED_ARG(pcb);
1969
1970 DPRINTF2(("%s: pxtcp %p; pcb %p\n",
1971 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
1972
1973 pxtcp_pcb_forward_inbound(pxtcp);
1974
1975 /*
1976 * If the last thing holding up deletion of the pxtcp was failed
1977 * tcp_shutdown() and it succeeded, we may be the last callback.
1978 */
1979 pxtcp_pcb_maybe_deferred_delete(pxtcp);
1980
1981 return ERR_OK;
1982}
1983
1984
1985static void
1986pxtcp_pcb_schedule_poll(struct pxtcp *pxtcp)
1987{
1988 tcp_poll(pxtcp->pcb, pxtcp_pcb_poll, 0);
1989}
1990
1991
1992static void
1993pxtcp_pcb_cancel_poll(struct pxtcp *pxtcp)
1994{
1995 tcp_poll(pxtcp->pcb, NULL, 255);
1996}
1997
1998
1999/**
2000 * Forward inbound data from ring buffer to the guest.
2001 *
2002 * Scheduled by poll manager thread after it receives more data into
2003 * the ring buffer (we have more data to send).
2004
2005 * Also called from tcp_sent() callback when guest ACKs some data,
2006 * increasing pcb->snd_buf (we are permitted to send more data).
2007 *
2008 * Also called from tcp_poll() callback if previous attempt to forward
2009 * inbound data failed with ERR_MEM (we need to try again).
2010 */
2011static void
2012pxtcp_pcb_forward_inbound(struct pxtcp *pxtcp)
2013{
2014 struct tcp_pcb *pcb;
2015 size_t sndbuf;
2016 size_t beg, lim, sndlim;
2017 size_t toeob, tolim;
2018 size_t nsent;
2019 err_t error;
2020
2021 LWIP_ASSERT1(pxtcp != NULL);
2022 pcb = pxtcp->pcb;
2023 if (pcb == NULL) {
2024 return;
2025 }
2026
2027 if (/* __predict_false */ pcb->state < ESTABLISHED) {
2028 /*
2029 * If we have just confirmed accept of this connection, the
2030 * pcb is in SYN_RCVD state and we still haven't received the
2031 * ACK of our SYN. It's only in SYN_RCVD -> ESTABLISHED
2032 * transition that lwip decrements pcb->acked so that that ACK
2033 * is not reported to pxtcp_pcb_sent(). If we send something
2034 * now and immediately close (think "daytime", e.g.) while
2035 * still in SYN_RCVD state, we will move directly to
2036 * FIN_WAIT_1 and when our confirming SYN is ACK'ed lwip will
2037 * report it to pxtcp_pcb_sent().
2038 */
2039 DPRINTF2(("forward_inbound: pxtcp %p; pcb %p %s - later...\n",
2040 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
2041 return;
2042 }
2043
2044
2045 beg = pxtcp->inbuf.unsent; /* private to lwip thread */
2046 lim = pxtcp->inbuf.vacant;
2047
2048 if (beg == lim) {
2049 if (pxtcp->inbound_close && !pxtcp->inbound_close_done) {
2050 pxtcp_pcb_forward_inbound_close(pxtcp);
2051 tcp_output(pcb);
2052 return;
2053 }
2054
2055 /*
2056 * Else, there's no data to send.
2057 *
2058 * If there is free space in the buffer, producer will
2059 * reschedule us as it receives more data and vacant (lim)
2060 * advances.
2061 *
2062 * If buffer is full when all data have been passed to
2063 * tcp_write() but not yet acknowledged, we will advance
2064 * unacked on ACK, freeing some space for producer to write to
2065 * (then see above).
2066 */
2067 return;
2068 }
2069
2070 sndbuf = tcp_sndbuf(pcb);
2071 if (sndbuf == 0) {
2072 /*
2073 * Can't send anything now. As guest ACKs some data, TCP will
2074 * call pxtcp_pcb_sent() callback and we will come here again.
2075 */
2076 return;
2077 }
2078
2079 nsent = 0;
2080
2081 /*
2082 * We have three limits to consider:
2083 * - how much data we have in the ringbuf
2084 * - how much data we are allowed to send
2085 * - ringbuf size
2086 */
2087 toeob = pxtcp->inbuf.bufsize - beg;
2088 if (lim < beg) { /* lim wrapped */
2089 if (sndbuf < toeob) { /* but we are limited by sndbuf */
2090 /* so beg is not going to wrap, treat sndbuf as lim */
2091 lim = beg + sndbuf; /* ... and proceed to the simple case */
2092 }
2093 else { /* we are limited by the end of the buffer, beg will wrap */
2094 u8_t maybemore;
2095 if (toeob == sndbuf || lim == 0) {
2096 maybemore = 0;
2097 }
2098 else {
2099 maybemore = TCP_WRITE_FLAG_MORE;
2100 }
2101
2102 error = tcp_write(pcb, &pxtcp->inbuf.buf[beg], toeob, maybemore);
2103 if (error != ERR_OK) {
2104 goto writeerr;
2105 }
2106 nsent += toeob;
2107 pxtcp->inbuf.unsent = 0; /* wrap */
2108
2109 if (maybemore) {
2110 beg = 0;
2111 sndbuf -= toeob;
2112 }
2113 else {
2114 /* we are done sending, but ... */
2115 goto check_inbound_close;
2116 }
2117 }
2118 }
2119
2120 LWIP_ASSERT1(beg < lim);
2121 sndlim = beg + sndbuf;
2122 if (lim > sndlim) {
2123 lim = sndlim;
2124 }
2125 tolim = lim - beg;
2126 if (tolim > 0) {
2127 error = tcp_write(pcb, &pxtcp->inbuf.buf[beg], (u16_t)tolim, 0);
2128 if (error != ERR_OK) {
2129 goto writeerr;
2130 }
2131 nsent += tolim;
2132 pxtcp->inbuf.unsent = lim;
2133 }
2134
2135 check_inbound_close:
2136 if (pxtcp->inbound_close && pxtcp->inbuf.unsent == pxtcp->inbuf.vacant) {
2137 pxtcp_pcb_forward_inbound_close(pxtcp);
2138 }
2139
2140 DPRINTF2(("forward_inbound: pxtcp %p, pcb %p: sent %d bytes\n",
2141 (void *)pxtcp, (void *)pcb, (int)nsent));
2142 tcp_output(pcb);
2143 pxtcp_pcb_cancel_poll(pxtcp);
2144 return;
2145
2146 writeerr:
2147 if (error == ERR_MEM) {
2148 if (nsent > 0) { /* first write succeeded, second failed */
2149 DPRINTF2(("forward_inbound: pxtcp %p, pcb %p: sent %d bytes only\n",
2150 (void *)pxtcp, (void *)pcb, (int)nsent));
2151 tcp_output(pcb);
2152 }
2153 DPRINTF(("forward_inbound: pxtcp %p, pcb %p: ERR_MEM\n",
2154 (void *)pxtcp, (void *)pcb));
2155 pxtcp_pcb_schedule_poll(pxtcp);
2156 }
2157 else {
2158 DPRINTF(("forward_inbound: pxtcp %p, pcb %p: %s\n",
2159 (void *)pxtcp, (void *)pcb, proxy_lwip_strerr(error)));
2160
2161 /* XXX: We shouldn't get ERR_ARG. Check ERR_CONN conditions early? */
2162 LWIP_ASSERT1(error == ERR_MEM);
2163 }
2164}
2165
2166
2167static void
2168pxtcp_pcb_forward_inbound_close(struct pxtcp *pxtcp)
2169{
2170 struct tcp_pcb *pcb;
2171 err_t error;
2172
2173 LWIP_ASSERT1(pxtcp != NULL);
2174 LWIP_ASSERT1(pxtcp->inbound_close);
2175 LWIP_ASSERT1(!pxtcp->inbound_close_done);
2176 LWIP_ASSERT1(pxtcp->inbuf.unsent == pxtcp->inbuf.vacant);
2177
2178 pcb = pxtcp->pcb;
2179 LWIP_ASSERT1(pcb != NULL);
2180
2181 DPRINTF(("inbound_close: pxtcp %p; pcb %p: %s\n",
2182 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
2183
2184 error = tcp_shutdown(pcb, /*RX*/ 0, /*TX*/ 1);
2185 if (error != ERR_OK) {
2186 DPRINTF(("inbound_close: pxtcp %p; pcb %p:"
2187 " tcp_shutdown: error=%s\n",
2188 (void *)pxtcp, (void *)pcb, proxy_lwip_strerr(error)));
2189 pxtcp_pcb_schedule_poll(pxtcp);
2190 return;
2191 }
2192
2193 pxtcp_pcb_cancel_poll(pxtcp);
2194 pxtcp->inbound_close_done = 1;
2195
2196
2197 /*
2198 * If we have already done outbound close previously (passive
2199 * close on the pcb), then we must not hold onto a pcb in LAST_ACK
2200 * state since those will be deleted by lwip when that last ack
2201 * comes from the guest.
2202 *
2203 * NB: We do NOT check for deferred delete here, even though we
2204 * have just set one of its conditions, inbound_close_done. We
2205 * let pcb callbacks that called us do that. It's simpler and
2206 * cleaner that way.
2207 */
2208 if (pxtcp->outbound_close_done && pxtcp_pcb_forward_inbound_done(pxtcp)) {
2209 pxtcp_pcb_dissociate(pxtcp);
2210 }
2211}
2212
2213
2214/**
2215 * Check that all forwarded inbound data is sent and acked, and that
2216 * inbound close is scheduled (we aren't called back when it's acked).
2217 */
2218DECLINLINE(int)
2219pxtcp_pcb_forward_inbound_done(const struct pxtcp *pxtcp)
2220{
2221 return (pxtcp->inbound_close_done /* also implies that all data forwarded */
2222 && pxtcp->inbuf.unacked == pxtcp->inbuf.unsent);
2223}
2224
2225
2226/**
2227 * tcp_sent() callback - guest acknowledged len bytes.
2228 *
2229 * We can advance inbuf::unacked index, making more free space in the
2230 * ringbuf and wake up producer on poll manager thread.
2231 *
2232 * We can also try to send more data if we have any since pcb->snd_buf
2233 * was increased and we are now permitted to send more.
2234 */
2235static err_t
2236pxtcp_pcb_sent(void *arg, struct tcp_pcb *pcb, u16_t len)
2237{
2238 struct pxtcp *pxtcp = (struct pxtcp *)arg;
2239 size_t unacked;
2240
2241 LWIP_ASSERT1(pxtcp != NULL);
2242 LWIP_ASSERT1(pxtcp->pcb == pcb);
2243 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
2244 LWIP_UNUSED_ARG(pcb); /* only in assert */
2245
2246 DPRINTF2(("%s: pxtcp %p; pcb %p: +%d ACKed:"
2247 " unacked %d, unsent %d, vacant %d\n",
2248 __func__, (void *)pxtcp, (void *)pcb, (int)len,
2249 (int)pxtcp->inbuf.unacked,
2250 (int)pxtcp->inbuf.unsent,
2251 (int)pxtcp->inbuf.vacant));
2252
2253 if (/* __predict_false */ len == 0) {
2254 /* we are notified to start pulling */
2255 LWIP_ASSERT1(!pxtcp->inbound_close);
2256 LWIP_ASSERT1(pxtcp->inbound_pull);
2257
2258 unacked = pxtcp->inbuf.unacked;
2259 }
2260 else {
2261 /*
2262 * Advance unacked index. Guest acknowledged the data, so it
2263 * won't be needed again for potential retransmits.
2264 */
2265 unacked = pxtcp->inbuf.unacked + len;
2266 if (unacked > pxtcp->inbuf.bufsize) {
2267 unacked -= pxtcp->inbuf.bufsize;
2268 }
2269 pxtcp->inbuf.unacked = unacked;
2270 }
2271
2272 /* arrange for more inbound data */
2273 if (!pxtcp->inbound_close) {
2274 if (!pxtcp->inbound_pull) {
2275 /* wake up producer, in case it has stopped polling for POLLIN */
2276 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp);
2277#ifdef RT_OS_WINDOWS
2278 /**
2279 * We have't got enought room in ring buffer to read atm,
2280 * but we don't want to lose notification from WSAW4ME when
2281 * space would be available, so we reset event with empty recv
2282 */
2283 recv(pxtcp->sock, NULL, 0, 0);
2284#endif
2285 }
2286 else {
2287 ssize_t nread;
2288 int stop_pollin; /* ignored */
2289
2290 nread = pxtcp_sock_read(pxtcp, &stop_pollin);
2291
2292 if (nread < 0) {
2293 int sockerr = -(int)nread;
2294 LWIP_UNUSED_ARG(sockerr);
2295 DPRINTF0(("%s: sock %d: %R[sockerr]\n",
2296 __func__, pxtcp->sock, sockerr));
2297
2298#if HAVE_TCP_POLLHUP == POLLIN /* see counterpart in pxtcp_pmgr_pump() */
2299 /*
2300 * It may still be registered with poll manager for POLLOUT.
2301 */
2302 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
2303 return ERR_OK;
2304#else
2305 /*
2306 * It is no longer registered with poll manager so we
2307 * can kill it directly.
2308 */
2309 pxtcp_pcb_reset_pxtcp(pxtcp);
2310 return ERR_ABRT;
2311#endif
2312 }
2313 }
2314 }
2315
2316 /* forward more data if we can */
2317 if (!pxtcp->inbound_close_done) {
2318 pxtcp_pcb_forward_inbound(pxtcp);
2319
2320 /*
2321 * NB: we might have dissociated from a pcb that transitioned
2322 * to LAST_ACK state, so don't refer to pcb below.
2323 */
2324 }
2325
2326
2327 /* have we got all the acks? */
2328 if (pxtcp->inbound_close /* no more new data */
2329 && pxtcp->inbuf.unsent == pxtcp->inbuf.vacant /* all data is sent */
2330 && unacked == pxtcp->inbuf.unsent) /* ... and is acked */
2331 {
2332 char *buf;
2333
2334 DPRINTF(("%s: pxtcp %p; pcb %p; all data ACKed\n",
2335 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2336
2337 /* no more retransmits, so buf is not needed */
2338 buf = pxtcp->inbuf.buf;
2339 pxtcp->inbuf.buf = NULL;
2340 free(buf);
2341
2342 /* no more acks, so no more callbacks */
2343 if (pxtcp->pcb != NULL) {
2344 tcp_sent(pxtcp->pcb, NULL);
2345 }
2346
2347 /*
2348 * We may be the last callback for this pcb if we have also
2349 * successfully forwarded inbound_close.
2350 */
2351 pxtcp_pcb_maybe_deferred_delete(pxtcp);
2352 }
2353
2354 return ERR_OK;
2355}
2356
2357
2358/**
2359 * Callback from poll manager (pxtcp::msg_inpull) to switch
2360 * pxtcp_pcb_sent() to actively pull the last bits of input. See
2361 * POLLHUP comment in pxtcp_pmgr_pump().
2362 *
2363 * pxtcp::sock is deregistered from poll manager after this callback
2364 * is scheduled.
2365 */
2366static void
2367pxtcp_pcb_pull_inbound(void *ctx)
2368{
2369 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
2370 LWIP_ASSERT1(pxtcp != NULL);
2371
2372 if (pxtcp->pcb == NULL) {
2373 DPRINTF(("%s: pxtcp %p: PCB IS GONE\n", __func__, (void *)pxtcp));
2374 pxtcp_pcb_reset_pxtcp(pxtcp);
2375 return;
2376 }
2377
2378 pxtcp->inbound_pull = 1;
2379 if (pxtcp->outbound_close_done) {
2380 DPRINTF(("%s: pxtcp %p: pcb %p (deferred delete)\n",
2381 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2382 pxtcp->deferred_delete = 1;
2383 }
2384 else {
2385 DPRINTF(("%s: pxtcp %p: pcb %p\n",
2386 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2387 }
2388
2389 pxtcp_pcb_sent(pxtcp, pxtcp->pcb, 0);
2390}
2391
2392
2393/**
2394 * tcp_err() callback.
2395 *
2396 * pcb is not passed to this callback since it may be already
2397 * deallocated by the stack, but we can't do anything useful with it
2398 * anyway since connection is gone.
2399 */
2400static void
2401pxtcp_pcb_err(void *arg, err_t error)
2402{
2403 struct pxtcp *pxtcp = (struct pxtcp *)arg;
2404 LWIP_ASSERT1(pxtcp != NULL);
2405
2406 /*
2407 * ERR_CLSD is special - it is reported here when:
2408 *
2409 * . guest has already half-closed
2410 * . we send FIN to guest when external half-closes
2411 * . guest acks that FIN
2412 *
2413 * Since connection is closed but receive has been already closed
2414 * lwip can only report this via tcp_err. At this point the pcb
2415 * is still alive, so we can peek at it if need be.
2416 *
2417 * The interesting twist is when the ACK from guest that akcs our
2418 * FIN also acks some data. In this scenario lwip will NOT call
2419 * tcp_sent() callback with the ACK for that last bit of data but
2420 * instead will call tcp_err with ERR_CLSD right away. Since that
2421 * ACK also acknowledges all the data, we should run some of
2422 * pxtcp_pcb_sent() logic here.
2423 */
2424 if (error == ERR_CLSD) {
2425 struct tcp_pcb *pcb = pxtcp->pcb; /* still alive */
2426
2427 DPRINTF2(("ERR_CLSD: pxtcp %p; pcb %p:"
2428 " pcb->acked %d;"
2429 " unacked %d, unsent %d, vacant %d\n",
2430 (void *)pxtcp, (void *)pcb,
2431 pcb->acked,
2432 (int)pxtcp->inbuf.unacked,
2433 (int)pxtcp->inbuf.unsent,
2434 (int)pxtcp->inbuf.vacant));
2435
2436 LWIP_ASSERT1(pxtcp->pcb == pcb);
2437 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
2438
2439 if (pcb->acked > 0) {
2440 pxtcp_pcb_sent(pxtcp, pcb, pcb->acked);
2441 }
2442 return;
2443 }
2444
2445 DPRINTF0(("tcp_err: pxtcp=%p, error=%s\n",
2446 (void *)pxtcp, proxy_lwip_strerr(error)));
2447
2448 pxtcp->pcb = NULL; /* pcb is gone */
2449 if (pxtcp->deferred_delete) {
2450 pxtcp_pcb_reset_pxtcp(pxtcp);
2451 }
2452 else {
2453 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
2454 }
2455}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette