VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/pxtcp.c@ 51892

Last change on this file since 51892 was 51681, checked in by vboxsync, 10 years ago

NAT/Net: Solaris doesn't report POLLHUP for TCP sockets.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 64.9 KB
Line 
1/* -*- indent-tabs-mode: nil; -*- */
2#define LOG_GROUP LOG_GROUP_NAT_SERVICE
3
4#include "winutils.h"
5
6#include "pxtcp.h"
7
8#include "proxy.h"
9#include "proxy_pollmgr.h"
10#include "pxremap.h"
11#include "portfwd.h" /* fwspec */
12
13#ifndef RT_OS_WINDOWS
14#include <sys/types.h>
15#include <sys/socket.h>
16#include <sys/ioctl.h>
17#ifdef RT_OS_SOLARIS
18#include <sys/filio.h> /* FIONREAD is BSD'ism */
19#endif
20#include <stdlib.h>
21#include <stdint.h>
22#include <stdio.h>
23#include <string.h>
24#include <poll.h>
25
26#include <err.h> /* BSD'ism */
27#else
28#include <stdlib.h>
29#include <stdio.h>
30#include <string.h>
31
32#include <iprt/stdint.h>
33#include "winpoll.h"
34#endif
35
36#include "lwip/opt.h"
37
38#include "lwip/sys.h"
39#include "lwip/tcpip.h"
40#include "lwip/netif.h"
41#include "lwip/tcp_impl.h" /* XXX: to access tcp_abandon() */
42#include "lwip/icmp.h"
43#include "lwip/icmp6.h"
44
45/*
46 * Different OSes have different quirks in reporting POLLHUP for TCP
47 * sockets.
48 *
49 * Using shutdown(2) "how" values here would be more readable, but
50 * since SHUT_RD is 0, we can't use 0 for "none", unfortunately.
51 */
52#if defined(RT_OS_NETBSD) || defined(RT_OS_SOLARIS)
53# define HAVE_TCP_POLLHUP 0 /* not reported */
54#elif defined(RT_OS_DARWIN)
55# define HAVE_TCP_POLLHUP POLLIN /* reported when remote closes */
56#else
57# define HAVE_TCP_POLLHUP (POLLIN|POLLOUT) /* reported when both directions are closed */
58#endif
59
60
61/**
62 * Ring buffer for inbound data. Filled with data from the host
63 * socket on poll manager thread. Data consumed by scheduling
64 * tcp_write() to the pcb on the lwip thread.
65 *
66 * NB: There is actually third party present, the lwip stack itself.
67 * Thus the buffer doesn't have dual free vs. data split, but rather
68 * three-way free / send and unACKed data / unsent data split.
69 */
70struct ringbuf {
71 char *buf;
72 size_t bufsize;
73
74 /*
75 * Start of free space, producer writes here (up till "unacked").
76 */
77 volatile size_t vacant;
78
79 /*
80 * Start of sent but unacknowledged data. The data are "owned" by
81 * the stack as it may need to retransmit. This is the free space
82 * limit for producer.
83 */
84 volatile size_t unacked;
85
86 /*
87 * Start of unsent data, consumer reads/sends from here (up till
88 * "vacant"). Not declared volatile since it's only accessed from
89 * the consumer thread.
90 */
91 size_t unsent;
92};
93
94
95/**
96 */
97struct pxtcp {
98 /**
99 * Our poll manager handler. Must be first, strong/weak
100 * references depend on this "inheritance".
101 */
102 struct pollmgr_handler pmhdl;
103
104 /**
105 * lwIP (internal/guest) side of the proxied connection.
106 */
107 struct tcp_pcb *pcb;
108
109 /**
110 * Host (external) side of the proxied connection.
111 */
112 SOCKET sock;
113
114 /**
115 * Socket events we are currently polling for.
116 */
117 int events;
118
119 /**
120 * Socket error. Currently used to save connect(2) errors so that
121 * we can decide if we need to send ICMP error.
122 */
123 int sockerr;
124
125 /**
126 * Interface that we have got the SYN from. Needed to send ICMP
127 * with correct source address.
128 */
129 struct netif *netif;
130
131 /**
132 * For tentatively accepted connections for which we are in
133 * process of connecting to the real destination this is the
134 * initial pbuf that we might need to build ICMP error.
135 *
136 * When connection is established this is used to hold outbound
137 * pbuf chain received by pxtcp_pcb_recv() but not yet completely
138 * forwarded over the socket. We cannot "return" it to lwIP since
139 * the head of the chain is already sent and freed.
140 */
141 struct pbuf *unsent;
142
143 /**
144 * Guest has closed its side. Reported to pxtcp_pcb_recv() only
145 * once and we might not be able to forward it immediately if we
146 * have unsent pbuf.
147 */
148 int outbound_close;
149
150 /**
151 * Outbound half-close has been done on the socket.
152 */
153 int outbound_close_done;
154
155 /**
156 * External has closed its side. We might not be able to forward
157 * it immediately if we have unforwarded data.
158 */
159 int inbound_close;
160
161 /**
162 * Inbound half-close has been done on the pcb.
163 */
164 int inbound_close_done;
165
166 /**
167 * On systems that report POLLHUP as soon as the final FIN is
168 * received on a socket we cannot continue polling for the rest of
169 * input, so we have to read (pull) last data from the socket on
170 * the lwIP thread instead of polling/pushing it from the poll
171 * manager thread. See comment in pxtcp_pmgr_pump() POLLHUP case.
172 */
173 int inbound_pull;
174
175
176 /**
177 * When poll manager schedules delete we may not be able to delete
178 * a pxtcp immediately if not all inbound data has been acked by
179 * the guest: lwIP may need to resend and the data are in pxtcp's
180 * inbuf::buf. We defer delete until all data are acked to
181 * pxtcp_pcb_sent().
182 */
183 int deferred_delete;
184
185 /**
186 * Ring-buffer for inbound data.
187 */
188 struct ringbuf inbuf;
189
190 /**
191 * lwIP thread's strong reference to us.
192 */
193 struct pollmgr_refptr *rp;
194
195
196 /*
197 * We use static messages to call functions on the lwIP thread to
198 * void malloc/free overhead.
199 */
200 struct tcpip_msg msg_delete; /* delete pxtcp */
201 struct tcpip_msg msg_reset; /* reset connection and delete pxtcp */
202 struct tcpip_msg msg_accept; /* confirm accept of proxied connection */
203 struct tcpip_msg msg_outbound; /* trigger send of outbound data */
204 struct tcpip_msg msg_inbound; /* trigger send of inbound data */
205 struct tcpip_msg msg_inpull; /* trigger pull of last inbound data */
206};
207
208
209
210static struct pxtcp *pxtcp_allocate(void);
211static void pxtcp_free(struct pxtcp *);
212
213static void pxtcp_pcb_associate(struct pxtcp *, struct tcp_pcb *);
214static void pxtcp_pcb_dissociate(struct pxtcp *);
215
216/* poll manager callbacks for pxtcp related channels */
217static int pxtcp_pmgr_chan_add(struct pollmgr_handler *, SOCKET, int);
218static int pxtcp_pmgr_chan_pollout(struct pollmgr_handler *, SOCKET, int);
219static int pxtcp_pmgr_chan_pollin(struct pollmgr_handler *, SOCKET, int);
220#if !(HAVE_TCP_POLLHUP & POLLOUT)
221static int pxtcp_pmgr_chan_del(struct pollmgr_handler *, SOCKET, int);
222#endif
223static int pxtcp_pmgr_chan_reset(struct pollmgr_handler *, SOCKET, int);
224
225/* helper functions for sending/receiving pxtcp over poll manager channels */
226static ssize_t pxtcp_chan_send(enum pollmgr_slot_t, struct pxtcp *);
227static ssize_t pxtcp_chan_send_weak(enum pollmgr_slot_t, struct pxtcp *);
228static struct pxtcp *pxtcp_chan_recv(struct pollmgr_handler *, SOCKET, int);
229static struct pxtcp *pxtcp_chan_recv_strong(struct pollmgr_handler *, SOCKET, int);
230
231/* poll manager callbacks for individual sockets */
232static int pxtcp_pmgr_connect(struct pollmgr_handler *, SOCKET, int);
233static int pxtcp_pmgr_pump(struct pollmgr_handler *, SOCKET, int);
234
235/* get incoming traffic into ring buffer */
236static ssize_t pxtcp_sock_read(struct pxtcp *, int *);
237static ssize_t pxtcp_sock_recv(struct pxtcp *, IOVEC *, size_t); /* default */
238
239/* convenience functions for poll manager callbacks */
240static int pxtcp_schedule_delete(struct pxtcp *);
241static int pxtcp_schedule_reset(struct pxtcp *);
242static int pxtcp_schedule_reject(struct pxtcp *);
243
244/* lwip thread callbacks called via proxy_lwip_post() */
245static void pxtcp_pcb_delete_pxtcp(void *);
246static void pxtcp_pcb_reset_pxtcp(void *);
247static void pxtcp_pcb_accept_refuse(void *);
248static void pxtcp_pcb_accept_confirm(void *);
249static void pxtcp_pcb_write_outbound(void *);
250static void pxtcp_pcb_write_inbound(void *);
251static void pxtcp_pcb_pull_inbound(void *);
252
253/* tcp pcb callbacks */
254static err_t pxtcp_pcb_heard(void *, struct tcp_pcb *, err_t); /* global */
255static err_t pxtcp_pcb_accept(void *, struct tcp_pcb *, err_t);
256static err_t pxtcp_pcb_connected(void *, struct tcp_pcb *, err_t);
257static err_t pxtcp_pcb_recv(void *, struct tcp_pcb *, struct pbuf *, err_t);
258static err_t pxtcp_pcb_sent(void *, struct tcp_pcb *, u16_t);
259static err_t pxtcp_pcb_poll(void *, struct tcp_pcb *);
260static void pxtcp_pcb_err(void *, err_t);
261
262static err_t pxtcp_pcb_forward_outbound(struct pxtcp *, struct pbuf *);
263static void pxtcp_pcb_forward_outbound_close(struct pxtcp *);
264
265static ssize_t pxtcp_sock_send(struct pxtcp *, IOVEC *, size_t);
266
267static void pxtcp_pcb_forward_inbound(struct pxtcp *);
268static void pxtcp_pcb_forward_inbound_close(struct pxtcp *);
269DECLINLINE(int) pxtcp_pcb_forward_inbound_done(const struct pxtcp *);
270static void pxtcp_pcb_schedule_poll(struct pxtcp *);
271static void pxtcp_pcb_cancel_poll(struct pxtcp *);
272
273static void pxtcp_pcb_reject(struct netif *, struct tcp_pcb *, struct pbuf *, int);
274DECLINLINE(void) pxtcp_pcb_maybe_deferred_delete(struct pxtcp *);
275
276/* poll manager handlers for pxtcp channels */
277static struct pollmgr_handler pxtcp_pmgr_chan_add_hdl;
278static struct pollmgr_handler pxtcp_pmgr_chan_pollout_hdl;
279static struct pollmgr_handler pxtcp_pmgr_chan_pollin_hdl;
280#if !(HAVE_TCP_POLLHUP & POLLOUT)
281static struct pollmgr_handler pxtcp_pmgr_chan_del_hdl;
282#endif
283static struct pollmgr_handler pxtcp_pmgr_chan_reset_hdl;
284
285
286/**
287 * Init PXTCP - must be run when neither lwIP tcpip thread, nor poll
288 * manager threads haven't been created yet.
289 */
290void
291pxtcp_init(void)
292{
293 /*
294 * Create channels.
295 */
296#define CHANNEL(SLOT, NAME) do { \
297 NAME##_hdl.callback = NAME; \
298 NAME##_hdl.data = NULL; \
299 NAME##_hdl.slot = -1; \
300 pollmgr_add_chan(SLOT, &NAME##_hdl); \
301 } while (0)
302
303 CHANNEL(POLLMGR_CHAN_PXTCP_ADD, pxtcp_pmgr_chan_add);
304 CHANNEL(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp_pmgr_chan_pollin);
305 CHANNEL(POLLMGR_CHAN_PXTCP_POLLOUT, pxtcp_pmgr_chan_pollout);
306#if !(HAVE_TCP_POLLHUP & POLLOUT)
307 CHANNEL(POLLMGR_CHAN_PXTCP_DEL, pxtcp_pmgr_chan_del);
308#endif
309 CHANNEL(POLLMGR_CHAN_PXTCP_RESET, pxtcp_pmgr_chan_reset);
310
311#undef CHANNEL
312
313 /*
314 * Listen to outgoing connection from guest(s).
315 */
316 tcp_proxy_accept(pxtcp_pcb_heard);
317}
318
319
320/**
321 * Syntactic sugar for sending pxtcp pointer over poll manager
322 * channel. Used by lwip thread functions.
323 */
324static ssize_t
325pxtcp_chan_send(enum pollmgr_slot_t slot, struct pxtcp *pxtcp)
326{
327 return pollmgr_chan_send(slot, &pxtcp, sizeof(pxtcp));
328}
329
330
331/**
332 * Syntactic sugar for sending weak reference to pxtcp over poll
333 * manager channel. Used by lwip thread functions.
334 */
335static ssize_t
336pxtcp_chan_send_weak(enum pollmgr_slot_t slot, struct pxtcp *pxtcp)
337{
338 pollmgr_refptr_weak_ref(pxtcp->rp);
339 return pollmgr_chan_send(slot, &pxtcp->rp, sizeof(pxtcp->rp));
340}
341
342
343/**
344 * Counterpart of pxtcp_chan_send().
345 */
346static struct pxtcp *
347pxtcp_chan_recv(struct pollmgr_handler *handler, SOCKET fd, int revents)
348{
349 struct pxtcp *pxtcp;
350
351 pxtcp = (struct pxtcp *)pollmgr_chan_recv_ptr(handler, fd, revents);
352 return pxtcp;
353}
354
355
356/**
357 * Counterpart of pxtcp_chan_send_weak().
358 */
359static struct pxtcp *
360pxtcp_chan_recv_strong(struct pollmgr_handler *handler, SOCKET fd, int revents)
361{
362 struct pollmgr_refptr *rp;
363 struct pollmgr_handler *base;
364 struct pxtcp *pxtcp;
365
366 rp = (struct pollmgr_refptr *)pollmgr_chan_recv_ptr(handler, fd, revents);
367 base = (struct pollmgr_handler *)pollmgr_refptr_get(rp);
368 pxtcp = (struct pxtcp *)base;
369
370 return pxtcp;
371}
372
373
374/**
375 * Register pxtcp with poll manager.
376 *
377 * Used for POLLMGR_CHAN_PXTCP_ADD and by port-forwarding. Since
378 * error handling is different in these two cases, we leave it up to
379 * the caller.
380 */
381int
382pxtcp_pmgr_add(struct pxtcp *pxtcp)
383{
384 int status;
385
386 LWIP_ASSERT1(pxtcp != NULL);
387 LWIP_ASSERT1(pxtcp->sock >= 0);
388 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
389 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
390 LWIP_ASSERT1(pxtcp->pmhdl.slot < 0);
391
392 status = pollmgr_add(&pxtcp->pmhdl, pxtcp->sock, pxtcp->events);
393 return status;
394}
395
396
397/**
398 * Unregister pxtcp with poll manager.
399 *
400 * Used for POLLMGR_CHAN_PXTCP_RESET and by port-forwarding (on error
401 * leg).
402 */
403void
404pxtcp_pmgr_del(struct pxtcp *pxtcp)
405{
406 LWIP_ASSERT1(pxtcp != NULL);
407
408 pollmgr_del_slot(pxtcp->pmhdl.slot);
409}
410
411
412/**
413 * POLLMGR_CHAN_PXTCP_ADD handler.
414 *
415 * Get new pxtcp from lwip thread and start polling its socket.
416 */
417static int
418pxtcp_pmgr_chan_add(struct pollmgr_handler *handler, SOCKET fd, int revents)
419{
420 struct pxtcp *pxtcp;
421 int status;
422
423 pxtcp = pxtcp_chan_recv(handler, fd, revents);
424 DPRINTF0(("pxtcp_add: new pxtcp %p; pcb %p; sock %d\n",
425 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
426
427 status = pxtcp_pmgr_add(pxtcp);
428 if (status < 0) {
429 (void) pxtcp_schedule_reset(pxtcp);
430 }
431
432 return POLLIN;
433}
434
435
436/**
437 * POLLMGR_CHAN_PXTCP_POLLOUT handler.
438 *
439 * pxtcp_pcb_forward_outbound() on the lwIP thread tried to send data
440 * and failed, it now requests us to poll the socket for POLLOUT and
441 * schedule pxtcp_pcb_forward_outbound() when sock is writable again.
442 */
443static int
444pxtcp_pmgr_chan_pollout(struct pollmgr_handler *handler, SOCKET fd, int revents)
445{
446 struct pxtcp *pxtcp;
447
448 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
449 DPRINTF0(("pxtcp_pollout: pxtcp %p\n", (void *)pxtcp));
450
451 if (pxtcp == NULL) {
452 return POLLIN;
453 }
454
455 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
456 LWIP_ASSERT1(pxtcp->pmhdl.slot > 0);
457
458 pxtcp->events |= POLLOUT;
459 pollmgr_update_events(pxtcp->pmhdl.slot, pxtcp->events);
460
461 return POLLIN;
462}
463
464
465/**
466 * POLLMGR_CHAN_PXTCP_POLLIN handler.
467 */
468static int
469pxtcp_pmgr_chan_pollin(struct pollmgr_handler *handler, SOCKET fd, int revents)
470{
471 struct pxtcp *pxtcp;
472
473 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
474 DPRINTF2(("pxtcp_pollin: pxtcp %p\n", (void *)pxtcp));
475
476 if (pxtcp == NULL) {
477 return POLLIN;
478 }
479
480 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
481 LWIP_ASSERT1(pxtcp->pmhdl.slot > 0);
482
483 if (pxtcp->inbound_close) {
484 return POLLIN;
485 }
486
487 pxtcp->events |= POLLIN;
488 pollmgr_update_events(pxtcp->pmhdl.slot, pxtcp->events);
489
490 return POLLIN;
491}
492
493
494#if !(HAVE_TCP_POLLHUP & POLLOUT)
495/**
496 * POLLMGR_CHAN_PXTCP_DEL handler.
497 *
498 * Schedule pxtcp deletion. We only need this if host system doesn't
499 * report POLLHUP for fully closed tcp sockets.
500 */
501static int
502pxtcp_pmgr_chan_del(struct pollmgr_handler *handler, SOCKET fd, int revents)
503{
504 struct pxtcp *pxtcp;
505
506 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
507 if (pxtcp == NULL) {
508 return POLLIN;
509 }
510
511 DPRINTF(("PXTCP_DEL: pxtcp %p; pcb %p; sock %d\n",
512 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
513
514 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
515 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
516
517 LWIP_ASSERT1(pxtcp->inbound_close); /* EOF read */
518 LWIP_ASSERT1(pxtcp->outbound_close_done); /* EOF sent */
519
520 pxtcp_pmgr_del(pxtcp);
521 (void) pxtcp_schedule_delete(pxtcp);
522
523 return POLLIN;
524}
525#endif /* !(HAVE_TCP_POLLHUP & POLLOUT) */
526
527
528/**
529 * POLLMGR_CHAN_PXTCP_RESET handler.
530 *
531 * Close the socket with RST and delete pxtcp.
532 */
533static int
534pxtcp_pmgr_chan_reset(struct pollmgr_handler *handler, SOCKET fd, int revents)
535{
536 struct pxtcp *pxtcp;
537
538 pxtcp = pxtcp_chan_recv_strong(handler, fd, revents);
539 if (pxtcp == NULL) {
540 return POLLIN;
541 }
542
543 DPRINTF0(("PXTCP_RESET: pxtcp %p; pcb %p; sock %d\n",
544 (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
545
546 LWIP_ASSERT1(pxtcp->pmhdl.callback != NULL);
547 LWIP_ASSERT1(pxtcp->pmhdl.data == (void *)pxtcp);
548
549 pxtcp_pmgr_del(pxtcp);
550
551 proxy_reset_socket(pxtcp->sock);
552 pxtcp->sock = INVALID_SOCKET;
553
554 (void) pxtcp_schedule_reset(pxtcp);
555
556 return POLLIN;
557}
558
559
560static struct pxtcp *
561pxtcp_allocate(void)
562{
563 struct pxtcp *pxtcp;
564
565 pxtcp = (struct pxtcp *)malloc(sizeof(*pxtcp));
566 if (pxtcp == NULL) {
567 return NULL;
568 }
569
570 pxtcp->pmhdl.callback = NULL;
571 pxtcp->pmhdl.data = (void *)pxtcp;
572 pxtcp->pmhdl.slot = -1;
573
574 pxtcp->pcb = NULL;
575 pxtcp->sock = INVALID_SOCKET;
576 pxtcp->events = 0;
577 pxtcp->sockerr = 0;
578 pxtcp->netif = NULL;
579 pxtcp->unsent = NULL;
580 pxtcp->outbound_close = 0;
581 pxtcp->outbound_close_done = 0;
582 pxtcp->inbound_close = 0;
583 pxtcp->inbound_close_done = 0;
584 pxtcp->inbound_pull = 0;
585 pxtcp->deferred_delete = 0;
586
587 pxtcp->inbuf.bufsize = 64 * 1024;
588 pxtcp->inbuf.buf = (char *)malloc(pxtcp->inbuf.bufsize);
589 if (pxtcp->inbuf.buf == NULL) {
590 free(pxtcp);
591 return NULL;
592 }
593 pxtcp->inbuf.vacant = 0;
594 pxtcp->inbuf.unacked = 0;
595 pxtcp->inbuf.unsent = 0;
596
597 pxtcp->rp = pollmgr_refptr_create(&pxtcp->pmhdl);
598 if (pxtcp->rp == NULL) {
599 free(pxtcp->inbuf.buf);
600 free(pxtcp);
601 return NULL;
602 }
603
604#define CALLBACK_MSG(MSG, FUNC) \
605 do { \
606 pxtcp->MSG.type = TCPIP_MSG_CALLBACK_STATIC; \
607 pxtcp->MSG.sem = NULL; \
608 pxtcp->MSG.msg.cb.function = FUNC; \
609 pxtcp->MSG.msg.cb.ctx = (void *)pxtcp; \
610 } while (0)
611
612 CALLBACK_MSG(msg_delete, pxtcp_pcb_delete_pxtcp);
613 CALLBACK_MSG(msg_reset, pxtcp_pcb_reset_pxtcp);
614 CALLBACK_MSG(msg_accept, pxtcp_pcb_accept_confirm);
615 CALLBACK_MSG(msg_outbound, pxtcp_pcb_write_outbound);
616 CALLBACK_MSG(msg_inbound, pxtcp_pcb_write_inbound);
617 CALLBACK_MSG(msg_inpull, pxtcp_pcb_pull_inbound);
618
619#undef CALLBACK_MSG
620
621 return pxtcp;
622}
623
624
625/**
626 * Exported to fwtcp to create pxtcp for incoming port-forwarded
627 * connections. Completed with pcb in pxtcp_pcb_connect().
628 */
629struct pxtcp *
630pxtcp_create_forwarded(SOCKET sock)
631{
632 struct pxtcp *pxtcp;
633
634 pxtcp = pxtcp_allocate();
635 if (pxtcp == NULL) {
636 return NULL;
637 }
638
639 pxtcp->sock = sock;
640 pxtcp->pmhdl.callback = pxtcp_pmgr_pump;
641 pxtcp->events = 0;
642
643 return pxtcp;
644}
645
646
647static void
648pxtcp_pcb_associate(struct pxtcp *pxtcp, struct tcp_pcb *pcb)
649{
650 LWIP_ASSERT1(pxtcp != NULL);
651 LWIP_ASSERT1(pcb != NULL);
652
653 pxtcp->pcb = pcb;
654
655 tcp_arg(pcb, pxtcp);
656
657 tcp_recv(pcb, pxtcp_pcb_recv);
658 tcp_sent(pcb, pxtcp_pcb_sent);
659 tcp_poll(pcb, NULL, 255);
660 tcp_err(pcb, pxtcp_pcb_err);
661}
662
663
664static void
665pxtcp_free(struct pxtcp *pxtcp)
666{
667 if (pxtcp->unsent != NULL) {
668 pbuf_free(pxtcp->unsent);
669 }
670 if (pxtcp->inbuf.buf != NULL) {
671 free(pxtcp->inbuf.buf);
672 }
673 free(pxtcp);
674}
675
676
677/**
678 * Counterpart to pxtcp_create_forwarded() to destruct pxtcp that
679 * fwtcp failed to register with poll manager to post to lwip thread
680 * for doing connect.
681 */
682void
683pxtcp_cancel_forwarded(struct pxtcp *pxtcp)
684{
685 LWIP_ASSERT1(pxtcp->pcb == NULL);
686 pxtcp_pcb_reset_pxtcp(pxtcp);
687}
688
689
690static void
691pxtcp_pcb_dissociate(struct pxtcp *pxtcp)
692{
693 if (pxtcp == NULL || pxtcp->pcb == NULL) {
694 return;
695 }
696
697 DPRINTF(("%s: pxtcp %p <-> pcb %p\n",
698 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
699
700 /*
701 * We must have dissociated from a fully closed pcb immediately
702 * since lwip recycles them and we don't wan't to mess with what
703 * would be someone else's pcb that we happen to have a stale
704 * pointer to.
705 */
706 LWIP_ASSERT1(pxtcp->pcb->callback_arg == pxtcp);
707
708 tcp_recv(pxtcp->pcb, NULL);
709 tcp_sent(pxtcp->pcb, NULL);
710 tcp_poll(pxtcp->pcb, NULL, 255);
711 tcp_err(pxtcp->pcb, NULL);
712 tcp_arg(pxtcp->pcb, NULL);
713 pxtcp->pcb = NULL;
714}
715
716
717/**
718 * Lwip thread callback invoked via pxtcp::msg_delete
719 *
720 * Since we use static messages to communicate to the lwip thread, we
721 * cannot delete pxtcp without making sure there are no unprocessed
722 * messages in the lwip thread mailbox.
723 *
724 * The easiest way to ensure that is to send this "delete" message as
725 * the last one and when it's processed we know there are no more and
726 * it's safe to delete pxtcp.
727 *
728 * Poll manager handlers should use pxtcp_schedule_delete()
729 * convenience function.
730 */
731static void
732pxtcp_pcb_delete_pxtcp(void *ctx)
733{
734 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
735
736 DPRINTF(("%s: pxtcp %p, pcb %p, sock %d%s\n",
737 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock,
738 (pxtcp->deferred_delete && !pxtcp->inbound_pull
739 ? " (was deferred)" : "")));
740
741 LWIP_ASSERT1(pxtcp != NULL);
742 LWIP_ASSERT1(pxtcp->pmhdl.slot < 0);
743 LWIP_ASSERT1(pxtcp->outbound_close_done);
744 LWIP_ASSERT1(pxtcp->inbound_close); /* not necessarily done */
745
746
747 /*
748 * pxtcp is no longer registered with poll manager, so it's safe
749 * to close the socket.
750 */
751 if (pxtcp->sock != INVALID_SOCKET) {
752 closesocket(pxtcp->sock);
753 pxtcp->sock = INVALID_SOCKET;
754 }
755
756 /*
757 * We might have already dissociated from a fully closed pcb, or
758 * guest might have sent us a reset while msg_delete was in
759 * transit. If there's no pcb, we are done.
760 */
761 if (pxtcp->pcb == NULL) {
762 pollmgr_refptr_unref(pxtcp->rp);
763 pxtcp_free(pxtcp);
764 return;
765 }
766
767 /*
768 * Have we completely forwarded all inbound traffic to the guest?
769 *
770 * We may still be waiting for ACKs. We may have failed to send
771 * some of the data (tcp_write() failed with ERR_MEM). We may
772 * have failed to send the FIN (tcp_shutdown() failed with
773 * ERR_MEM).
774 */
775 if (pxtcp_pcb_forward_inbound_done(pxtcp)) {
776 pxtcp_pcb_dissociate(pxtcp);
777 pollmgr_refptr_unref(pxtcp->rp);
778 pxtcp_free(pxtcp);
779 }
780 else {
781 DPRINTF2(("delete: pxtcp %p; pcb %p:"
782 " unacked %d, unsent %d, vacant %d, %s - DEFER!\n",
783 (void *)pxtcp, (void *)pxtcp->pcb,
784 (int)pxtcp->inbuf.unacked,
785 (int)pxtcp->inbuf.unsent,
786 (int)pxtcp->inbuf.vacant,
787 pxtcp->inbound_close_done ? "FIN sent" : "FIN is NOT sent"));
788
789 LWIP_ASSERT1(!pxtcp->deferred_delete);
790 pxtcp->deferred_delete = 1;
791 }
792}
793
794
795/**
796 * If we couldn't delete pxtcp right away in the msg_delete callback
797 * from the poll manager thread, we repeat the check at the end of
798 * relevant pcb callbacks.
799 */
800DECLINLINE(void)
801pxtcp_pcb_maybe_deferred_delete(struct pxtcp *pxtcp)
802{
803 if (pxtcp->deferred_delete && pxtcp_pcb_forward_inbound_done(pxtcp)) {
804 pxtcp_pcb_delete_pxtcp(pxtcp);
805 }
806}
807
808
809/**
810 * Poll manager callbacks should use this convenience wrapper to
811 * schedule pxtcp deletion on the lwip thread and to deregister from
812 * the poll manager.
813 */
814static int
815pxtcp_schedule_delete(struct pxtcp *pxtcp)
816{
817 /*
818 * If pollmgr_refptr_get() is called by any channel before
819 * scheduled deletion happens, let them know we are gone.
820 */
821 pxtcp->pmhdl.slot = -1;
822
823 /*
824 * Schedule deletion. Since poll manager thread may be pre-empted
825 * right after we send the message, the deletion may actually
826 * happen on the lwip thread before we return from this function,
827 * so it's not safe to refer to pxtcp after this call.
828 */
829 proxy_lwip_post(&pxtcp->msg_delete);
830
831 /* tell poll manager to deregister us */
832 return -1;
833}
834
835
836/**
837 * Lwip thread callback invoked via pxtcp::msg_reset
838 *
839 * Like pxtcp_pcb_delete(), but sends RST to the guest before
840 * deleting this pxtcp.
841 */
842static void
843pxtcp_pcb_reset_pxtcp(void *ctx)
844{
845 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
846 LWIP_ASSERT1(pxtcp != NULL);
847
848 DPRINTF0(("%s: pxtcp %p, pcb %p, sock %d\n",
849 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
850
851 if (pxtcp->sock != INVALID_SOCKET) {
852 proxy_reset_socket(pxtcp->sock);
853 pxtcp->sock = INVALID_SOCKET;
854 }
855
856 if (pxtcp->pcb != NULL) {
857 struct tcp_pcb *pcb = pxtcp->pcb;
858 pxtcp_pcb_dissociate(pxtcp);
859 tcp_abort(pcb);
860 }
861
862 pollmgr_refptr_unref(pxtcp->rp);
863 pxtcp_free(pxtcp);
864}
865
866
867
868/**
869 * Poll manager callbacks should use this convenience wrapper to
870 * schedule pxtcp reset and deletion on the lwip thread and to
871 * deregister from the poll manager.
872 *
873 * See pxtcp_schedule_delete() for additional comments.
874 */
875static int
876pxtcp_schedule_reset(struct pxtcp *pxtcp)
877{
878 pxtcp->pmhdl.slot = -1;
879 proxy_lwip_post(&pxtcp->msg_reset);
880 return -1;
881}
882
883
884/**
885 * Reject proxy connection attempt. Depending on the cause (sockerr)
886 * we may just drop the pcb silently, generate an ICMP datagram or
887 * send TCP reset.
888 */
889static void
890pxtcp_pcb_reject(struct netif *netif, struct tcp_pcb *pcb,
891 struct pbuf *p, int sockerr)
892{
893 struct netif *oif;
894 int reset = 0;
895
896 oif = ip_current_netif();
897 ip_current_netif() = netif;
898
899 if (sockerr == ECONNREFUSED) {
900 reset = 1;
901 }
902 else if (PCB_ISIPV6(pcb)) {
903 if (sockerr == EHOSTDOWN) {
904 icmp6_dest_unreach(p, ICMP6_DUR_ADDRESS); /* XXX: ??? */
905 }
906 else if (sockerr == EHOSTUNREACH
907 || sockerr == ENETDOWN
908 || sockerr == ENETUNREACH)
909 {
910 icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE);
911 }
912 }
913 else {
914 if (sockerr == EHOSTDOWN
915 || sockerr == EHOSTUNREACH
916 || sockerr == ENETDOWN
917 || sockerr == ENETUNREACH)
918 {
919 icmp_dest_unreach(p, ICMP_DUR_HOST);
920 }
921 }
922
923 ip_current_netif() = oif;
924
925 tcp_abandon(pcb, reset);
926}
927
928
929/**
930 * Called from poll manager thread via pxtcp::msg_accept when proxy
931 * failed to connect to the destination. Also called when we failed
932 * to register pxtcp with poll manager.
933 *
934 * This is like pxtcp_pcb_reset_pxtcp() but is more discriminate in
935 * how this unestablished connection is terminated.
936 */
937static void
938pxtcp_pcb_accept_refuse(void *ctx)
939{
940 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
941
942 DPRINTF0(("%s: pxtcp %p, pcb %p, sock %d: %R[sockerr]\n",
943 __func__, (void *)pxtcp, (void *)pxtcp->pcb,
944 pxtcp->sock, pxtcp->sockerr));
945
946 LWIP_ASSERT1(pxtcp != NULL);
947 LWIP_ASSERT1(pxtcp->sock == INVALID_SOCKET);
948
949 if (pxtcp->pcb != NULL) {
950 struct tcp_pcb *pcb = pxtcp->pcb;
951 pxtcp_pcb_dissociate(pxtcp);
952 pxtcp_pcb_reject(pxtcp->netif, pcb, pxtcp->unsent, pxtcp->sockerr);
953 }
954
955 pollmgr_refptr_unref(pxtcp->rp);
956 pxtcp_free(pxtcp);
957}
958
959
960/**
961 * Convenience wrapper for poll manager connect callback to reject
962 * connection attempt.
963 *
964 * Like pxtcp_schedule_reset(), but the callback is more discriminate
965 * in how this unestablished connection is terminated.
966 */
967static int
968pxtcp_schedule_reject(struct pxtcp *pxtcp)
969{
970 pxtcp->msg_accept.msg.cb.function = pxtcp_pcb_accept_refuse;
971 pxtcp->pmhdl.slot = -1;
972 proxy_lwip_post(&pxtcp->msg_accept);
973 return -1;
974}
975
976
977/**
978 * Global tcp_proxy_accept() callback for proxied outgoing TCP
979 * connections from guest(s).
980 */
981static err_t
982pxtcp_pcb_heard(void *arg, struct tcp_pcb *newpcb, err_t error)
983{
984 struct pbuf *p = (struct pbuf *)arg;
985 struct pxtcp *pxtcp;
986 ipX_addr_t dst_addr;
987 int sdom;
988 SOCKET sock;
989 ssize_t nsent;
990 int sockerr = 0;
991
992 LWIP_UNUSED_ARG(error); /* always ERR_OK */
993
994 /*
995 * TCP first calls accept callback when it receives the first SYN
996 * and "tentatively accepts" new proxied connection attempt. When
997 * proxy "confirms" the SYN and sends SYN|ACK and the guest
998 * replies with ACK the accept callback is called again, this time
999 * with the established connection.
1000 */
1001 LWIP_ASSERT1(newpcb->state == SYN_RCVD_0);
1002 tcp_accept(newpcb, pxtcp_pcb_accept);
1003 tcp_arg(newpcb, NULL);
1004
1005 tcp_setprio(newpcb, TCP_PRIO_MAX);
1006
1007 pxremap_outbound_ipX(PCB_ISIPV6(newpcb), &dst_addr, &newpcb->local_ip);
1008
1009 sdom = PCB_ISIPV6(newpcb) ? PF_INET6 : PF_INET;
1010 sock = proxy_connected_socket(sdom, SOCK_STREAM,
1011 &dst_addr, newpcb->local_port);
1012 if (sock == INVALID_SOCKET) {
1013 sockerr = SOCKERRNO();
1014 goto abort;
1015 }
1016
1017 pxtcp = pxtcp_allocate();
1018 if (pxtcp == NULL) {
1019 proxy_reset_socket(sock);
1020 goto abort;
1021 }
1022
1023 /* save initial datagram in case we need to reply with ICMP */
1024 pbuf_ref(p);
1025 pxtcp->unsent = p;
1026 pxtcp->netif = ip_current_netif();
1027
1028 pxtcp_pcb_associate(pxtcp, newpcb);
1029 pxtcp->sock = sock;
1030
1031 pxtcp->pmhdl.callback = pxtcp_pmgr_connect;
1032 pxtcp->events = POLLOUT;
1033
1034 nsent = pxtcp_chan_send(POLLMGR_CHAN_PXTCP_ADD, pxtcp);
1035 if (nsent < 0) {
1036 pxtcp->sock = INVALID_SOCKET;
1037 proxy_reset_socket(sock);
1038 pxtcp_pcb_accept_refuse(pxtcp);
1039 return ERR_ABRT;
1040 }
1041
1042 return ERR_OK;
1043
1044 abort:
1045 DPRINTF0(("%s: pcb %p, sock %d: %R[sockerr]\n",
1046 __func__, (void *)newpcb, sock, sockerr));
1047 pxtcp_pcb_reject(ip_current_netif(), newpcb, p, sockerr);
1048 return ERR_ABRT;
1049}
1050
1051
1052/**
1053 * tcp_proxy_accept() callback for accepted proxied outgoing TCP
1054 * connections from guest(s). This is "real" accept with three-way
1055 * handshake completed.
1056 */
1057static err_t
1058pxtcp_pcb_accept(void *arg, struct tcp_pcb *pcb, err_t error)
1059{
1060 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1061
1062 LWIP_UNUSED_ARG(pcb); /* used only in asserts */
1063 LWIP_UNUSED_ARG(error); /* always ERR_OK */
1064
1065 LWIP_ASSERT1(pxtcp != NULL);
1066 LWIP_ASSERT1(pxtcp->pcb = pcb);
1067 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1068
1069 /* send any inbound data that are already queued */
1070 pxtcp_pcb_forward_inbound(pxtcp);
1071 return ERR_OK;
1072}
1073
1074
1075/**
1076 * Initial poll manager callback for proxied outgoing TCP connections.
1077 * pxtcp_pcb_accept() sets pxtcp::pmhdl::callback to this.
1078 *
1079 * Waits for connect(2) to the destination to complete. On success
1080 * replaces itself with pxtcp_pmgr_pump() callback common to all
1081 * established TCP connections.
1082 */
1083static int
1084pxtcp_pmgr_connect(struct pollmgr_handler *handler, SOCKET fd, int revents)
1085{
1086 struct pxtcp *pxtcp;
1087
1088 pxtcp = (struct pxtcp *)handler->data;
1089 LWIP_ASSERT1(handler == &pxtcp->pmhdl);
1090 LWIP_ASSERT1(fd == pxtcp->sock);
1091
1092 if (revents & (POLLNVAL | POLLHUP | POLLERR)) {
1093 if (revents & POLLNVAL) {
1094 pxtcp->sock = INVALID_SOCKET;
1095 pxtcp->sockerr = ETIMEDOUT;
1096 }
1097 else {
1098 socklen_t optlen = (socklen_t)sizeof(pxtcp->sockerr);
1099 int status;
1100 SOCKET s;
1101
1102 status = getsockopt(pxtcp->sock, SOL_SOCKET, SO_ERROR,
1103 (char *)&pxtcp->sockerr, &optlen);
1104 if (status < 0) { /* should not happen */
1105 DPRINTF(("%s: sock %d: SO_ERROR failed: %R[sockerr]\n",
1106 __func__, fd, SOCKERRNO()));
1107 }
1108 else {
1109 DPRINTF(("%s: sock %d: connect: %R[sockerr]\n",
1110 __func__, fd, pxtcp->sockerr));
1111 }
1112 s = pxtcp->sock;
1113 pxtcp->sock = INVALID_SOCKET;
1114 closesocket(s);
1115 }
1116 return pxtcp_schedule_reject(pxtcp);
1117 }
1118
1119 if (revents & POLLOUT) { /* connect is successful */
1120 /* confirm accept to the guest */
1121 proxy_lwip_post(&pxtcp->msg_accept);
1122
1123 /*
1124 * Switch to common callback used for all established proxied
1125 * connections.
1126 */
1127 pxtcp->pmhdl.callback = pxtcp_pmgr_pump;
1128
1129 /*
1130 * Initially we poll for incoming traffic only. Outgoing
1131 * traffic is fast-forwarded by pxtcp_pcb_recv(); if it fails
1132 * it will ask us to poll for POLLOUT too.
1133 */
1134 pxtcp->events = POLLIN;
1135 return pxtcp->events;
1136 }
1137
1138 /* should never get here */
1139 DPRINTF0(("%s: pxtcp %p, sock %d: unexpected revents 0x%x\n",
1140 __func__, (void *)pxtcp, fd, revents));
1141 return pxtcp_schedule_reset(pxtcp);
1142}
1143
1144
1145/**
1146 * Called from poll manager thread via pxtcp::msg_accept when proxy
1147 * connected to the destination. Finalize accept by sending SYN|ACK
1148 * to the guest.
1149 */
1150static void
1151pxtcp_pcb_accept_confirm(void *ctx)
1152{
1153 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1154 err_t error;
1155
1156 LWIP_ASSERT1(pxtcp != NULL);
1157 if (pxtcp->pcb == NULL) {
1158 return;
1159 }
1160
1161 /* we are not going to reply with ICMP, so we can drop initial pbuf */
1162 LWIP_ASSERT1(pxtcp->unsent != NULL);
1163 pbuf_free(pxtcp->unsent);
1164 pxtcp->unsent = NULL;
1165
1166 error = tcp_proxy_accept_confirm(pxtcp->pcb);
1167
1168 /*
1169 * If lwIP failed to enqueue SYN|ACK because it's out of pbufs it
1170 * abandons the pcb. Retrying that is not very easy, since it
1171 * would require keeping "fractional state". From guest's point
1172 * of view there is no reply to its SYN so it will either resend
1173 * the SYN (effetively triggering full connection retry for us),
1174 * or it will eventually time out.
1175 */
1176 if (error == ERR_ABRT) {
1177 pxtcp->pcb = NULL; /* pcb is gone */
1178 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
1179 }
1180
1181 /*
1182 * else if (error != ERR_OK): even if tcp_output() failed with
1183 * ERR_MEM - don't give up, that SYN|ACK is enqueued and will be
1184 * retransmitted eventually.
1185 */
1186}
1187
1188
1189/**
1190 * Entry point for port-forwarding.
1191 *
1192 * fwtcp accepts new incoming connection, creates pxtcp for the socket
1193 * (with no pcb yet) and adds it to the poll manager (polling for
1194 * errors only). Then it calls this function to construct the pcb and
1195 * perform connection to the guest.
1196 */
1197void
1198pxtcp_pcb_connect(struct pxtcp *pxtcp, const struct fwspec *fwspec)
1199{
1200 struct sockaddr_storage ss;
1201 socklen_t sslen;
1202 struct tcp_pcb *pcb;
1203 ipX_addr_t src_addr, dst_addr;
1204 u16_t src_port, dst_port;
1205 int status;
1206 err_t error;
1207
1208 LWIP_ASSERT1(pxtcp != NULL);
1209 LWIP_ASSERT1(pxtcp->pcb == NULL);
1210 LWIP_ASSERT1(fwspec->stype == SOCK_STREAM);
1211
1212 pcb = tcp_new();
1213 if (pcb == NULL) {
1214 goto reset;
1215 }
1216
1217 tcp_setprio(pcb, TCP_PRIO_MAX);
1218 pxtcp_pcb_associate(pxtcp, pcb);
1219
1220 sslen = sizeof(ss);
1221 status = getpeername(pxtcp->sock, (struct sockaddr *)&ss, &sslen);
1222 if (status == SOCKET_ERROR) {
1223 goto reset;
1224 }
1225
1226 /* nit: comapres PF and AF, but they are the same everywhere */
1227 LWIP_ASSERT1(ss.ss_family == fwspec->sdom);
1228
1229 status = fwany_ipX_addr_set_src(&src_addr, (const struct sockaddr *)&ss);
1230 if (status == PXREMAP_FAILED) {
1231 goto reset;
1232 }
1233
1234 if (ss.ss_family == PF_INET) {
1235 const struct sockaddr_in *peer4 = (const struct sockaddr_in *)&ss;
1236
1237 src_port = peer4->sin_port;
1238
1239 memcpy(&dst_addr.ip4, &fwspec->dst.sin.sin_addr, sizeof(ip_addr_t));
1240 dst_port = fwspec->dst.sin.sin_port;
1241 }
1242 else { /* PF_INET6 */
1243 const struct sockaddr_in6 *peer6 = (const struct sockaddr_in6 *)&ss;
1244 ip_set_v6(pcb, 1);
1245
1246 src_port = peer6->sin6_port;
1247
1248 memcpy(&dst_addr.ip6, &fwspec->dst.sin6.sin6_addr, sizeof(ip6_addr_t));
1249 dst_port = fwspec->dst.sin6.sin6_port;
1250 }
1251
1252 /* lwip port arguments are in host order */
1253 src_port = ntohs(src_port);
1254 dst_port = ntohs(dst_port);
1255
1256 error = tcp_proxy_bind(pcb, ipX_2_ip(&src_addr), src_port);
1257 if (error != ERR_OK) {
1258 goto reset;
1259 }
1260
1261 error = tcp_connect(pcb, ipX_2_ip(&dst_addr), dst_port,
1262 /* callback: */ pxtcp_pcb_connected);
1263 if (error != ERR_OK) {
1264 goto reset;
1265 }
1266
1267 return;
1268
1269 reset:
1270 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
1271}
1272
1273
1274/**
1275 * Port-forwarded connection to guest is successful, pump data.
1276 */
1277static err_t
1278pxtcp_pcb_connected(void *arg, struct tcp_pcb *pcb, err_t error)
1279{
1280 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1281
1282 LWIP_ASSERT1(error == ERR_OK); /* always called with ERR_OK */
1283 LWIP_UNUSED_ARG(error);
1284
1285 LWIP_ASSERT1(pxtcp != NULL);
1286 LWIP_ASSERT1(pxtcp->pcb == pcb);
1287 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1288 LWIP_UNUSED_ARG(pcb);
1289
1290 DPRINTF0(("%s: new pxtcp %p; pcb %p; sock %d\n",
1291 __func__, (void *)pxtcp, (void *)pxtcp->pcb, pxtcp->sock));
1292
1293 /* ACK on connection is like ACK on data in pxtcp_pcb_sent() */
1294 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp);
1295
1296 return ERR_OK;
1297}
1298
1299
1300/**
1301 * tcp_recv() callback.
1302 */
1303static err_t
1304pxtcp_pcb_recv(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t error)
1305{
1306 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1307
1308 LWIP_ASSERT1(error == ERR_OK); /* always called with ERR_OK */
1309 LWIP_UNUSED_ARG(error);
1310
1311 LWIP_ASSERT1(pxtcp != NULL);
1312 LWIP_ASSERT1(pxtcp->pcb == pcb);
1313 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
1314 LWIP_UNUSED_ARG(pcb);
1315
1316
1317 /*
1318 * Have we done sending previous batch?
1319 */
1320 if (pxtcp->unsent != NULL) {
1321 if (p != NULL) {
1322 /*
1323 * Return an error to tell TCP to hold onto that pbuf.
1324 * It will be presented to us later from tcp_fasttmr().
1325 */
1326 return ERR_WOULDBLOCK;
1327 }
1328 else {
1329 /*
1330 * Unlike data, p == NULL indicating orderly shutdown is
1331 * NOT presented to us again
1332 */
1333 pxtcp->outbound_close = 1;
1334 return ERR_OK;
1335 }
1336 }
1337
1338
1339 /*
1340 * Guest closed?
1341 */
1342 if (p == NULL) {
1343 pxtcp->outbound_close = 1;
1344 pxtcp_pcb_forward_outbound_close(pxtcp);
1345 return ERR_OK;
1346 }
1347
1348
1349 /*
1350 * Got data, send what we can without blocking.
1351 */
1352 return pxtcp_pcb_forward_outbound(pxtcp, p);
1353}
1354
1355
1356/**
1357 * Guest half-closed its TX side of the connection.
1358 *
1359 * Called either immediately from pxtcp_pcb_recv() when it gets NULL,
1360 * or from pxtcp_pcb_forward_outbound() when it finishes forwarding
1361 * previously unsent data and sees pxtcp::outbound_close flag saved by
1362 * pxtcp_pcb_recv().
1363 */
1364static void
1365pxtcp_pcb_forward_outbound_close(struct pxtcp *pxtcp)
1366{
1367 struct tcp_pcb *pcb;
1368
1369 LWIP_ASSERT1(pxtcp != NULL);
1370 LWIP_ASSERT1(pxtcp->outbound_close);
1371 LWIP_ASSERT1(!pxtcp->outbound_close_done);
1372
1373 pcb = pxtcp->pcb;
1374 LWIP_ASSERT1(pcb != NULL);
1375
1376 DPRINTF(("outbound_close: pxtcp %p; pcb %p %s\n",
1377 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
1378
1379
1380 /* set the flag first, since shutdown() may trigger POLLHUP */
1381 pxtcp->outbound_close_done = 1;
1382 shutdown(pxtcp->sock, SHUT_WR); /* half-close the socket */
1383
1384#if !(HAVE_TCP_POLLHUP & POLLOUT)
1385 /*
1386 * We need to nudge poll manager manually, since OS will not
1387 * report POLLHUP.
1388 */
1389 if (pxtcp->inbound_close) {
1390 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_DEL, pxtcp);
1391 }
1392#endif
1393
1394
1395 /* no more outbound data coming to us */
1396 tcp_recv(pcb, NULL);
1397
1398 /*
1399 * If we have already done inbound close previously (active close
1400 * on the pcb), then we must not hold onto a pcb in TIME_WAIT
1401 * state since those will be recycled by lwip when it runs out of
1402 * free pcbs in the pool.
1403 *
1404 * The test is true also for a pcb in CLOSING state that waits
1405 * just for the ACK of its FIN (to transition to TIME_WAIT).
1406 */
1407 if (pxtcp_pcb_forward_inbound_done(pxtcp)) {
1408 pxtcp_pcb_dissociate(pxtcp);
1409 }
1410}
1411
1412
1413/**
1414 * Forward outbound data from pcb to socket.
1415 *
1416 * Called by pxtcp_pcb_recv() to forward new data and by callout
1417 * triggered by POLLOUT on the socket to send previously unsent data.
1418 *
1419 * (Re)scehdules one-time callout if not all data are sent.
1420 */
1421static err_t
1422pxtcp_pcb_forward_outbound(struct pxtcp *pxtcp, struct pbuf *p)
1423{
1424 struct pbuf *qs, *q;
1425 size_t qoff;
1426 size_t forwarded;
1427 int sockerr;
1428
1429 LWIP_ASSERT1(pxtcp->unsent == NULL || pxtcp->unsent == p);
1430
1431 forwarded = 0;
1432 sockerr = 0;
1433
1434 q = NULL;
1435 qoff = 0;
1436
1437 qs = p;
1438 while (qs != NULL) {
1439 IOVEC iov[8];
1440 const size_t iovsize = sizeof(iov)/sizeof(iov[0]);
1441 size_t fwd1;
1442 ssize_t nsent;
1443 size_t i;
1444
1445 fwd1 = 0;
1446 for (i = 0, q = qs; i < iovsize && q != NULL; ++i, q = q->next) {
1447 LWIP_ASSERT1(q->len > 0);
1448 IOVEC_SET_BASE(iov[i], q->payload);
1449 IOVEC_SET_LEN(iov[i], q->len);
1450 fwd1 += q->len;
1451 }
1452
1453 /*
1454 * TODO: This is where application-level proxy can hook into
1455 * to process outbound traffic.
1456 */
1457 nsent = pxtcp_sock_send(pxtcp, iov, i);
1458
1459 if (nsent == (ssize_t)fwd1) {
1460 /* successfully sent this chain fragment completely */
1461 forwarded += nsent;
1462 qs = q;
1463 }
1464 else if (nsent >= 0) {
1465 /* successfully sent only some data */
1466 forwarded += nsent;
1467
1468 /* find the first pbuf that was not completely forwarded */
1469 qoff = nsent;
1470 for (i = 0, q = qs; i < iovsize && q != NULL; ++i, q = q->next) {
1471 if (qoff < q->len) {
1472 break;
1473 }
1474 qoff -= q->len;
1475 }
1476 LWIP_ASSERT1(q != NULL);
1477 LWIP_ASSERT1(qoff < q->len);
1478 break;
1479 }
1480 else {
1481 sockerr = -nsent;
1482
1483 /*
1484 * Some errors are really not errors - if we get them,
1485 * it's not different from getting nsent == 0, so filter
1486 * them out here.
1487 */
1488 if (proxy_error_is_transient(sockerr)) {
1489 sockerr = 0;
1490 }
1491 q = qs;
1492 qoff = 0;
1493 break;
1494 }
1495 }
1496
1497 if (forwarded > 0) {
1498 tcp_recved(pxtcp->pcb, (u16_t)forwarded);
1499 }
1500
1501 if (q == NULL) { /* everything is forwarded? */
1502 LWIP_ASSERT1(sockerr == 0);
1503 LWIP_ASSERT1(forwarded == p->tot_len);
1504
1505 pxtcp->unsent = NULL;
1506 pbuf_free(p);
1507 if (pxtcp->outbound_close) {
1508 pxtcp_pcb_forward_outbound_close(pxtcp);
1509 }
1510 }
1511 else {
1512 if (q != p) {
1513 /* free forwarded pbufs at the beginning of the chain */
1514 pbuf_ref(q);
1515 pbuf_free(p);
1516 }
1517 if (qoff > 0) {
1518 /* advance payload pointer past the forwarded part */
1519 pbuf_header(q, -(s16_t)qoff);
1520 }
1521 pxtcp->unsent = q;
1522
1523 /*
1524 * Have sendmsg() failed?
1525 *
1526 * Connection reset will be detected by poll and
1527 * pxtcp_schedule_reset() will be called.
1528 *
1529 * Otherwise something *really* unexpected must have happened,
1530 * so we'd better abort.
1531 */
1532 if (sockerr != 0 && sockerr != ECONNRESET) {
1533 struct tcp_pcb *pcb = pxtcp->pcb;
1534 pxtcp_pcb_dissociate(pxtcp);
1535
1536 tcp_abort(pcb);
1537
1538 /* call error callback manually since we've already dissociated */
1539 pxtcp_pcb_err((void *)pxtcp, ERR_ABRT);
1540 return ERR_ABRT;
1541 }
1542
1543 /* schedule one-shot POLLOUT on the socket */
1544 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLOUT, pxtcp);
1545 }
1546 return ERR_OK;
1547}
1548
1549
1550#if !defined(RT_OS_WINDOWS)
1551static ssize_t
1552pxtcp_sock_send(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1553{
1554 struct msghdr mh;
1555 ssize_t nsent;
1556
1557#ifdef MSG_NOSIGNAL
1558 const int send_flags = MSG_NOSIGNAL;
1559#else
1560 const int send_flags = 0;
1561#endif
1562
1563 memset(&mh, 0, sizeof(mh));
1564
1565 mh.msg_iov = iov;
1566 mh.msg_iovlen = iovlen;
1567
1568 nsent = sendmsg(pxtcp->sock, &mh, send_flags);
1569 if (nsent < 0) {
1570 nsent = -SOCKERRNO();
1571 }
1572
1573 return nsent;
1574}
1575#else /* RT_OS_WINDOWS */
1576static ssize_t
1577pxtcp_sock_send(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1578{
1579 DWORD nsent;
1580 int status;
1581
1582 status = WSASend(pxtcp->sock, iov, (DWORD)iovlen, &nsent,
1583 0, NULL, NULL);
1584 if (status == SOCKET_ERROR) {
1585 nsent = -SOCKERRNO();
1586 }
1587
1588 return nsent;
1589}
1590#endif /* RT_OS_WINDOWS */
1591
1592
1593/**
1594 * Callback from poll manager (on POLLOUT) to send data from
1595 * pxtcp::unsent pbuf to socket.
1596 */
1597static void
1598pxtcp_pcb_write_outbound(void *ctx)
1599{
1600 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1601 LWIP_ASSERT1(pxtcp != NULL);
1602
1603 if (pxtcp->pcb == NULL) {
1604 return;
1605 }
1606
1607 pxtcp_pcb_forward_outbound(pxtcp, pxtcp->unsent);
1608}
1609
1610
1611/**
1612 * Common poll manager callback used by both outgoing and incoming
1613 * (port-forwarded) connections that has connected socket.
1614 */
1615static int
1616pxtcp_pmgr_pump(struct pollmgr_handler *handler, SOCKET fd, int revents)
1617{
1618 struct pxtcp *pxtcp;
1619 int status;
1620 int sockerr;
1621
1622 pxtcp = (struct pxtcp *)handler->data;
1623 LWIP_ASSERT1(handler == &pxtcp->pmhdl);
1624 LWIP_ASSERT1(fd == pxtcp->sock);
1625
1626 if (revents & POLLNVAL) {
1627 pxtcp->sock = INVALID_SOCKET;
1628 return pxtcp_schedule_reset(pxtcp);
1629 }
1630
1631 if (revents & POLLERR) {
1632 socklen_t optlen = (socklen_t)sizeof(sockerr);
1633
1634 status = getsockopt(pxtcp->sock, SOL_SOCKET, SO_ERROR,
1635 (char *)&sockerr, &optlen);
1636 if (status < 0) { /* should not happen */
1637 DPRINTF(("sock %d: SO_ERROR failed: %R[sockerr]\n",
1638 fd, SOCKERRNO()));
1639 }
1640 else {
1641 DPRINTF0(("sock %d: %R[sockerr]\n", fd, sockerr));
1642 }
1643 return pxtcp_schedule_reset(pxtcp);
1644 }
1645
1646 if (revents & POLLOUT) {
1647 pxtcp->events &= ~POLLOUT;
1648 proxy_lwip_post(&pxtcp->msg_outbound);
1649 }
1650
1651 if (revents & POLLIN) {
1652 ssize_t nread;
1653 int stop_pollin;
1654
1655 nread = pxtcp_sock_read(pxtcp, &stop_pollin);
1656 if (nread < 0) {
1657 sockerr = -(int)nread;
1658 DPRINTF0(("sock %d: %R[sockerr]\n", fd, sockerr));
1659 return pxtcp_schedule_reset(pxtcp);
1660 }
1661
1662 if (stop_pollin) {
1663 pxtcp->events &= ~POLLIN;
1664 }
1665
1666 if (nread > 0) {
1667 proxy_lwip_post(&pxtcp->msg_inbound);
1668#if !HAVE_TCP_POLLHUP
1669 /*
1670 * If host does not report POLLHUP for closed sockets
1671 * (e.g. NetBSD) we should check for full close manually.
1672 */
1673 if (pxtcp->inbound_close && pxtcp->outbound_close_done) {
1674 LWIP_ASSERT1((revents & POLLHUP) == 0);
1675 return pxtcp_schedule_delete(pxtcp);
1676 }
1677#endif
1678 }
1679 }
1680
1681#if !HAVE_TCP_POLLHUP
1682 LWIP_ASSERT1((revents & POLLHUP) == 0);
1683#else
1684 if (revents & POLLHUP) {
1685#if HAVE_TCP_POLLHUP == POLLIN
1686 /*
1687 * Remote closed inbound.
1688 */
1689 if (!pxtcp->outbound_close_done) {
1690 /*
1691 * We might still need to poll for POLLOUT, but we can not
1692 * poll for POLLIN anymore (even if not all data are read)
1693 * because we will be spammed by POLLHUP.
1694 */
1695 pxtcp->events &= ~POLLIN;
1696 if (!pxtcp->inbound_close) {
1697 /* the rest of the input has to be pulled */
1698 proxy_lwip_post(&pxtcp->msg_inpull);
1699 }
1700 }
1701 else
1702#endif
1703 /*
1704 * Both directions are closed.
1705 */
1706 {
1707 DPRINTF(("sock %d: HUP\n", fd));
1708 LWIP_ASSERT1(pxtcp->outbound_close_done);
1709
1710 if (pxtcp->inbound_close) {
1711 /* there's no unread data, we are done */
1712 return pxtcp_schedule_delete(pxtcp);
1713 }
1714 else {
1715 /* pull the rest of the input first (deferred_delete) */
1716 pxtcp->pmhdl.slot = -1;
1717 proxy_lwip_post(&pxtcp->msg_inpull);
1718 return -1;
1719 }
1720 /* NOTREACHED */
1721 }
1722
1723 }
1724#endif /* HAVE_TCP_POLLHUP */
1725
1726 return pxtcp->events;
1727}
1728
1729
1730/**
1731 * Read data from socket to ringbuf. This may be used both on lwip
1732 * and poll manager threads.
1733 *
1734 * Flag pointed to by pstop is set when further reading is impossible,
1735 * either temporary when buffer is full, or permanently when EOF is
1736 * received.
1737 *
1738 * Returns number of bytes read. NB: EOF is reported as 1!
1739 *
1740 * Returns zero if nothing was read, either because buffer is full, or
1741 * if no data is available (EWOULDBLOCK, EINTR &c).
1742 *
1743 * Returns -errno on real socket errors.
1744 */
1745static ssize_t
1746pxtcp_sock_read(struct pxtcp *pxtcp, int *pstop)
1747{
1748 IOVEC iov[2];
1749 size_t iovlen;
1750 ssize_t nread;
1751
1752 const size_t sz = pxtcp->inbuf.bufsize;
1753 size_t beg, lim, wrnew;
1754
1755 *pstop = 0;
1756
1757 beg = pxtcp->inbuf.vacant;
1758 IOVEC_SET_BASE(iov[0], &pxtcp->inbuf.buf[beg]);
1759
1760 /* lim is the index we can NOT write to */
1761 lim = pxtcp->inbuf.unacked;
1762 if (lim == 0) {
1763 lim = sz - 1; /* empty slot at the end */
1764 }
1765 else if (lim == 1) {
1766 lim = sz; /* empty slot at the beginning */
1767 }
1768 else {
1769 --lim;
1770 }
1771
1772 if (beg == lim) {
1773 /*
1774 * Buffer is full, stop polling for POLLIN.
1775 *
1776 * pxtcp_pcb_sent() will re-enable POLLIN when guest ACKs
1777 * data, freeing space in the ring buffer.
1778 */
1779 *pstop = 1;
1780 return 0;
1781 }
1782
1783 if (beg < lim) {
1784 /* free space in one chunk */
1785 iovlen = 1;
1786 IOVEC_SET_LEN(iov[0], lim - beg);
1787 }
1788 else {
1789 /* free space in two chunks */
1790 iovlen = 2;
1791 IOVEC_SET_LEN(iov[0], sz - beg);
1792 IOVEC_SET_BASE(iov[1], &pxtcp->inbuf.buf[0]);
1793 IOVEC_SET_LEN(iov[1], lim);
1794 }
1795
1796 /*
1797 * TODO: This is where application-level proxy can hook into to
1798 * process inbound traffic.
1799 */
1800 nread = pxtcp_sock_recv(pxtcp, iov, iovlen);
1801
1802 if (nread > 0) {
1803 wrnew = beg + nread;
1804 if (wrnew >= sz) {
1805 wrnew -= sz;
1806 }
1807 pxtcp->inbuf.vacant = wrnew;
1808 DPRINTF2(("pxtcp %p: sock %d read %d bytes\n",
1809 (void *)pxtcp, pxtcp->sock, (int)nread));
1810 return nread;
1811 }
1812 else if (nread == 0) {
1813 *pstop = 1;
1814 pxtcp->inbound_close = 1;
1815 DPRINTF2(("pxtcp %p: sock %d read EOF\n",
1816 (void *)pxtcp, pxtcp->sock));
1817 return 1;
1818 }
1819 else {
1820 int sockerr = -nread;
1821
1822 if (proxy_error_is_transient(sockerr)) {
1823 /* haven't read anything, just return */
1824 DPRINTF2(("pxtcp %p: sock %d read cancelled\n",
1825 (void *)pxtcp, pxtcp->sock));
1826 return 0;
1827 }
1828 else {
1829 /* socket error! */
1830 DPRINTF0(("pxtcp %p: sock %d read: %R[sockerr]\n",
1831 (void *)pxtcp, pxtcp->sock, sockerr));
1832 return -sockerr;
1833 }
1834 }
1835}
1836
1837
1838#if !defined(RT_OS_WINDOWS)
1839static ssize_t
1840pxtcp_sock_recv(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1841{
1842 struct msghdr mh;
1843 ssize_t nread;
1844
1845 memset(&mh, 0, sizeof(mh));
1846
1847 mh.msg_iov = iov;
1848 mh.msg_iovlen = iovlen;
1849
1850 nread = recvmsg(pxtcp->sock, &mh, 0);
1851 if (nread < 0) {
1852 nread = -SOCKERRNO();
1853 }
1854
1855 return nread;
1856}
1857#else /* RT_OS_WINDOWS */
1858static ssize_t
1859pxtcp_sock_recv(struct pxtcp *pxtcp, IOVEC *iov, size_t iovlen)
1860{
1861 DWORD flags;
1862 DWORD nread;
1863 int status;
1864
1865 flags = 0;
1866 status = WSARecv(pxtcp->sock, iov, (DWORD)iovlen, &nread,
1867 &flags, NULL, NULL);
1868 if (status == SOCKET_ERROR) {
1869 nread = -SOCKERRNO();
1870 }
1871
1872 return (ssize_t)nread;
1873}
1874#endif /* RT_OS_WINDOWS */
1875
1876
1877/**
1878 * Callback from poll manager (pxtcp::msg_inbound) to trigger output
1879 * from ringbuf to guest.
1880 */
1881static void
1882pxtcp_pcb_write_inbound(void *ctx)
1883{
1884 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
1885 LWIP_ASSERT1(pxtcp != NULL);
1886
1887 if (pxtcp->pcb == NULL) {
1888 return;
1889 }
1890
1891 pxtcp_pcb_forward_inbound(pxtcp);
1892}
1893
1894
1895/**
1896 * tcp_poll() callback
1897 *
1898 * We swtich it on when tcp_write() or tcp_shutdown() fail with
1899 * ERR_MEM to prevent connection from stalling. If there are ACKs or
1900 * more inbound data then pxtcp_pcb_forward_inbound() will be
1901 * triggered again, but if neither happens, tcp_poll() comes to the
1902 * rescue.
1903 */
1904static err_t
1905pxtcp_pcb_poll(void *arg, struct tcp_pcb *pcb)
1906{
1907 struct pxtcp *pxtcp = (struct pxtcp *)arg;
1908 LWIP_UNUSED_ARG(pcb);
1909
1910 DPRINTF2(("%s: pxtcp %p; pcb %p\n",
1911 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
1912
1913 pxtcp_pcb_forward_inbound(pxtcp);
1914
1915 /*
1916 * If the last thing holding up deletion of the pxtcp was failed
1917 * tcp_shutdown() and it succeeded, we may be the last callback.
1918 */
1919 pxtcp_pcb_maybe_deferred_delete(pxtcp);
1920
1921 return ERR_OK;
1922}
1923
1924
1925static void
1926pxtcp_pcb_schedule_poll(struct pxtcp *pxtcp)
1927{
1928 tcp_poll(pxtcp->pcb, pxtcp_pcb_poll, 0);
1929}
1930
1931
1932static void
1933pxtcp_pcb_cancel_poll(struct pxtcp *pxtcp)
1934{
1935 tcp_poll(pxtcp->pcb, NULL, 255);
1936}
1937
1938
1939/**
1940 * Forward inbound data from ring buffer to the guest.
1941 *
1942 * Scheduled by poll manager thread after it receives more data into
1943 * the ring buffer (we have more data to send).
1944
1945 * Also called from tcp_sent() callback when guest ACKs some data,
1946 * increasing pcb->snd_buf (we are permitted to send more data).
1947 *
1948 * Also called from tcp_poll() callback if previous attempt to forward
1949 * inbound data failed with ERR_MEM (we need to try again).
1950 */
1951static void
1952pxtcp_pcb_forward_inbound(struct pxtcp *pxtcp)
1953{
1954 struct tcp_pcb *pcb;
1955 size_t sndbuf;
1956 size_t beg, lim, sndlim;
1957 size_t toeob, tolim;
1958 size_t nsent;
1959 err_t error;
1960
1961 LWIP_ASSERT1(pxtcp != NULL);
1962 pcb = pxtcp->pcb;
1963 if (pcb == NULL) {
1964 return;
1965 }
1966
1967 if (/* __predict_false */ pcb->state < ESTABLISHED) {
1968 /*
1969 * If we have just confirmed accept of this connection, the
1970 * pcb is in SYN_RCVD state and we still haven't received the
1971 * ACK of our SYN. It's only in SYN_RCVD -> ESTABLISHED
1972 * transition that lwip decrements pcb->acked so that that ACK
1973 * is not reported to pxtcp_pcb_sent(). If we send something
1974 * now and immediately close (think "daytime", e.g.) while
1975 * still in SYN_RCVD state, we will move directly to
1976 * FIN_WAIT_1 and when our confirming SYN is ACK'ed lwip will
1977 * report it to pxtcp_pcb_sent().
1978 */
1979 DPRINTF2(("forward_inbound: pxtcp %p; pcb %p %s - later...\n",
1980 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
1981 return;
1982 }
1983
1984
1985 beg = pxtcp->inbuf.unsent; /* private to lwip thread */
1986 lim = pxtcp->inbuf.vacant;
1987
1988 if (beg == lim) {
1989 if (pxtcp->inbound_close && !pxtcp->inbound_close_done) {
1990 pxtcp_pcb_forward_inbound_close(pxtcp);
1991 tcp_output(pcb);
1992 return;
1993 }
1994
1995 /*
1996 * Else, there's no data to send.
1997 *
1998 * If there is free space in the buffer, producer will
1999 * reschedule us as it receives more data and vacant (lim)
2000 * advances.
2001 *
2002 * If buffer is full when all data have been passed to
2003 * tcp_write() but not yet acknowledged, we will advance
2004 * unacked on ACK, freeing some space for producer to write to
2005 * (then see above).
2006 */
2007 return;
2008 }
2009
2010 sndbuf = tcp_sndbuf(pcb);
2011 if (sndbuf == 0) {
2012 /*
2013 * Can't send anything now. As guest ACKs some data, TCP will
2014 * call pxtcp_pcb_sent() callback and we will come here again.
2015 */
2016 return;
2017 }
2018
2019 nsent = 0;
2020
2021 /*
2022 * We have three limits to consider:
2023 * - how much data we have in the ringbuf
2024 * - how much data we are allowed to send
2025 * - ringbuf size
2026 */
2027 toeob = pxtcp->inbuf.bufsize - beg;
2028 if (lim < beg) { /* lim wrapped */
2029 if (sndbuf < toeob) { /* but we are limited by sndbuf */
2030 /* so beg is not going to wrap, treat sndbuf as lim */
2031 lim = beg + sndbuf; /* ... and proceed to the simple case */
2032 }
2033 else { /* we are limited by the end of the buffer, beg will wrap */
2034 u8_t maybemore;
2035 if (toeob == sndbuf || lim == 0) {
2036 maybemore = 0;
2037 }
2038 else {
2039 maybemore = TCP_WRITE_FLAG_MORE;
2040 }
2041
2042 error = tcp_write(pcb, &pxtcp->inbuf.buf[beg], toeob, maybemore);
2043 if (error != ERR_OK) {
2044 goto writeerr;
2045 }
2046 nsent += toeob;
2047 pxtcp->inbuf.unsent = 0; /* wrap */
2048
2049 if (maybemore) {
2050 beg = 0;
2051 sndbuf -= toeob;
2052 }
2053 else {
2054 /* we are done sending, but ... */
2055 goto check_inbound_close;
2056 }
2057 }
2058 }
2059
2060 LWIP_ASSERT1(beg < lim);
2061 sndlim = beg + sndbuf;
2062 if (lim > sndlim) {
2063 lim = sndlim;
2064 }
2065 tolim = lim - beg;
2066 if (tolim > 0) {
2067 error = tcp_write(pcb, &pxtcp->inbuf.buf[beg], (u16_t)tolim, 0);
2068 if (error != ERR_OK) {
2069 goto writeerr;
2070 }
2071 nsent += tolim;
2072 pxtcp->inbuf.unsent = lim;
2073 }
2074
2075 check_inbound_close:
2076 if (pxtcp->inbound_close && pxtcp->inbuf.unsent == pxtcp->inbuf.vacant) {
2077 pxtcp_pcb_forward_inbound_close(pxtcp);
2078 }
2079
2080 DPRINTF2(("forward_inbound: pxtcp %p, pcb %p: sent %d bytes\n",
2081 (void *)pxtcp, (void *)pcb, (int)nsent));
2082 tcp_output(pcb);
2083 pxtcp_pcb_cancel_poll(pxtcp);
2084 return;
2085
2086 writeerr:
2087 if (error == ERR_MEM) {
2088 if (nsent > 0) { /* first write succeeded, second failed */
2089 DPRINTF2(("forward_inbound: pxtcp %p, pcb %p: sent %d bytes only\n",
2090 (void *)pxtcp, (void *)pcb, (int)nsent));
2091 tcp_output(pcb);
2092 }
2093 DPRINTF(("forward_inbound: pxtcp %p, pcb %p: ERR_MEM\n",
2094 (void *)pxtcp, (void *)pcb));
2095 pxtcp_pcb_schedule_poll(pxtcp);
2096 }
2097 else {
2098 DPRINTF(("forward_inbound: pxtcp %p, pcb %p: %s\n",
2099 (void *)pxtcp, (void *)pcb, proxy_lwip_strerr(error)));
2100
2101 /* XXX: We shouldn't get ERR_ARG. Check ERR_CONN conditions early? */
2102 LWIP_ASSERT1(error == ERR_MEM);
2103 }
2104}
2105
2106
2107static void
2108pxtcp_pcb_forward_inbound_close(struct pxtcp *pxtcp)
2109{
2110 struct tcp_pcb *pcb;
2111 err_t error;
2112
2113 LWIP_ASSERT1(pxtcp != NULL);
2114 LWIP_ASSERT1(pxtcp->inbound_close);
2115 LWIP_ASSERT1(!pxtcp->inbound_close_done);
2116 LWIP_ASSERT1(pxtcp->inbuf.unsent == pxtcp->inbuf.vacant);
2117
2118 pcb = pxtcp->pcb;
2119 LWIP_ASSERT1(pcb != NULL);
2120
2121 DPRINTF(("inbound_close: pxtcp %p; pcb %p: %s\n",
2122 (void *)pxtcp, (void *)pcb, tcp_debug_state_str(pcb->state)));
2123
2124 error = tcp_shutdown(pcb, /*RX*/ 0, /*TX*/ 1);
2125 if (error != ERR_OK) {
2126 DPRINTF(("inbound_close: pxtcp %p; pcb %p:"
2127 " tcp_shutdown: error=%s\n",
2128 (void *)pxtcp, (void *)pcb, proxy_lwip_strerr(error)));
2129 pxtcp_pcb_schedule_poll(pxtcp);
2130 return;
2131 }
2132
2133 pxtcp_pcb_cancel_poll(pxtcp);
2134 pxtcp->inbound_close_done = 1;
2135
2136
2137 /*
2138 * If we have already done outbound close previously (passive
2139 * close on the pcb), then we must not hold onto a pcb in LAST_ACK
2140 * state since those will be deleted by lwip when that last ack
2141 * comes from the guest.
2142 *
2143 * NB: We do NOT check for deferred delete here, even though we
2144 * have just set one of its conditions, inbound_close_done. We
2145 * let pcb callbacks that called us do that. It's simpler and
2146 * cleaner that way.
2147 */
2148 if (pxtcp->outbound_close_done && pxtcp_pcb_forward_inbound_done(pxtcp)) {
2149 pxtcp_pcb_dissociate(pxtcp);
2150 }
2151}
2152
2153
2154/**
2155 * Check that all forwarded inbound data is sent and acked, and that
2156 * inbound close is scheduled (we aren't called back when it's acked).
2157 */
2158DECLINLINE(int)
2159pxtcp_pcb_forward_inbound_done(const struct pxtcp *pxtcp)
2160{
2161 return (pxtcp->inbound_close_done /* also implies that all data forwarded */
2162 && pxtcp->inbuf.unacked == pxtcp->inbuf.unsent);
2163}
2164
2165
2166/**
2167 * tcp_sent() callback - guest acknowledged len bytes.
2168 *
2169 * We can advance inbuf::unacked index, making more free space in the
2170 * ringbuf and wake up producer on poll manager thread.
2171 *
2172 * We can also try to send more data if we have any since pcb->snd_buf
2173 * was increased and we are now permitted to send more.
2174 */
2175static err_t
2176pxtcp_pcb_sent(void *arg, struct tcp_pcb *pcb, u16_t len)
2177{
2178 struct pxtcp *pxtcp = (struct pxtcp *)arg;
2179 size_t unacked;
2180
2181 LWIP_ASSERT1(pxtcp != NULL);
2182 LWIP_ASSERT1(pxtcp->pcb == pcb);
2183 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
2184 LWIP_UNUSED_ARG(pcb); /* only in assert */
2185
2186 DPRINTF2(("%s: pxtcp %p; pcb %p: +%d ACKed:"
2187 " unacked %d, unsent %d, vacant %d\n",
2188 __func__, (void *)pxtcp, (void *)pcb, (int)len,
2189 (int)pxtcp->inbuf.unacked,
2190 (int)pxtcp->inbuf.unsent,
2191 (int)pxtcp->inbuf.vacant));
2192
2193 if (/* __predict_false */ len == 0) {
2194 /* we are notified to start pulling */
2195 LWIP_ASSERT1(!pxtcp->inbound_close);
2196 LWIP_ASSERT1(pxtcp->inbound_pull);
2197
2198 unacked = pxtcp->inbuf.unacked;
2199 }
2200 else {
2201 /*
2202 * Advance unacked index. Guest acknowledged the data, so it
2203 * won't be needed again for potential retransmits.
2204 */
2205 unacked = pxtcp->inbuf.unacked + len;
2206 if (unacked > pxtcp->inbuf.bufsize) {
2207 unacked -= pxtcp->inbuf.bufsize;
2208 }
2209 pxtcp->inbuf.unacked = unacked;
2210 }
2211
2212 /* arrange for more inbound data */
2213 if (!pxtcp->inbound_close) {
2214 if (!pxtcp->inbound_pull) {
2215 /* wake up producer, in case it has stopped polling for POLLIN */
2216 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_POLLIN, pxtcp);
2217#ifdef RT_OS_WINDOWS
2218 /**
2219 * We have't got enought room in ring buffer to read atm,
2220 * but we don't want to lose notification from WSAW4ME when
2221 * space would be available, so we reset event with empty recv
2222 */
2223 recv(pxtcp->sock, NULL, 0, 0);
2224#endif
2225 }
2226 else {
2227 ssize_t nread;
2228 int stop_pollin; /* ignored */
2229
2230 nread = pxtcp_sock_read(pxtcp, &stop_pollin);
2231
2232 if (nread < 0) {
2233 int sockerr = -(int)nread;
2234 LWIP_UNUSED_ARG(sockerr);
2235 DPRINTF0(("%s: sock %d: %R[sockerr]\n",
2236 __func__, pxtcp->sock, sockerr));
2237
2238 /*
2239 * Since we are pulling, pxtcp is no longer registered
2240 * with poll manager so we can kill it directly.
2241 */
2242 pxtcp_pcb_reset_pxtcp(pxtcp);
2243 return ERR_ABRT;
2244 }
2245 }
2246 }
2247
2248 /* forward more data if we can */
2249 if (!pxtcp->inbound_close_done) {
2250 pxtcp_pcb_forward_inbound(pxtcp);
2251
2252 /*
2253 * NB: we might have dissociated from a pcb that transitioned
2254 * to LAST_ACK state, so don't refer to pcb below.
2255 */
2256 }
2257
2258
2259 /* have we got all the acks? */
2260 if (pxtcp->inbound_close /* no more new data */
2261 && pxtcp->inbuf.unsent == pxtcp->inbuf.vacant /* all data is sent */
2262 && unacked == pxtcp->inbuf.unsent) /* ... and is acked */
2263 {
2264 char *buf;
2265
2266 DPRINTF(("%s: pxtcp %p; pcb %p; all data ACKed\n",
2267 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2268
2269 /* no more retransmits, so buf is not needed */
2270 buf = pxtcp->inbuf.buf;
2271 pxtcp->inbuf.buf = NULL;
2272 free(buf);
2273
2274 /* no more acks, so no more callbacks */
2275 if (pxtcp->pcb != NULL) {
2276 tcp_sent(pxtcp->pcb, NULL);
2277 }
2278
2279 /*
2280 * We may be the last callback for this pcb if we have also
2281 * successfully forwarded inbound_close.
2282 */
2283 pxtcp_pcb_maybe_deferred_delete(pxtcp);
2284 }
2285
2286 return ERR_OK;
2287}
2288
2289
2290/**
2291 * Callback from poll manager (pxtcp::msg_inpull) to switch
2292 * pxtcp_pcb_sent() to actively pull the last bits of input. See
2293 * POLLHUP comment in pxtcp_pmgr_pump().
2294 *
2295 * pxtcp::sock is deregistered from poll manager after this callback
2296 * is scheduled.
2297 */
2298static void
2299pxtcp_pcb_pull_inbound(void *ctx)
2300{
2301 struct pxtcp *pxtcp = (struct pxtcp *)ctx;
2302 LWIP_ASSERT1(pxtcp != NULL);
2303
2304 if (pxtcp->pcb == NULL) {
2305 DPRINTF(("%s: pxtcp %p: PCB IS GONE\n", __func__, (void *)pxtcp));
2306 pxtcp_pcb_reset_pxtcp(pxtcp);
2307 return;
2308 }
2309
2310 pxtcp->inbound_pull = 1;
2311 if (pxtcp->outbound_close_done) {
2312 DPRINTF(("%s: pxtcp %p: pcb %p (deferred delete)\n",
2313 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2314 pxtcp->deferred_delete = 1;
2315 }
2316 else {
2317 DPRINTF(("%s: pxtcp %p: pcb %p\n",
2318 __func__, (void *)pxtcp, (void *)pxtcp->pcb));
2319 }
2320
2321 pxtcp_pcb_sent(pxtcp, pxtcp->pcb, 0);
2322}
2323
2324
2325/**
2326 * tcp_err() callback.
2327 *
2328 * pcb is not passed to this callback since it may be already
2329 * deallocated by the stack, but we can't do anything useful with it
2330 * anyway since connection is gone.
2331 */
2332static void
2333pxtcp_pcb_err(void *arg, err_t error)
2334{
2335 struct pxtcp *pxtcp = (struct pxtcp *)arg;
2336 LWIP_ASSERT1(pxtcp != NULL);
2337
2338 /*
2339 * ERR_CLSD is special - it is reported here when:
2340 *
2341 * . guest has already half-closed
2342 * . we send FIN to guest when external half-closes
2343 * . guest acks that FIN
2344 *
2345 * Since connection is closed but receive has been already closed
2346 * lwip can only report this via tcp_err. At this point the pcb
2347 * is still alive, so we can peek at it if need be.
2348 *
2349 * The interesting twist is when the ACK from guest that akcs our
2350 * FIN also acks some data. In this scenario lwip will NOT call
2351 * tcp_sent() callback with the ACK for that last bit of data but
2352 * instead will call tcp_err with ERR_CLSD right away. Since that
2353 * ACK also acknowledges all the data, we should run some of
2354 * pxtcp_pcb_sent() logic here.
2355 */
2356 if (error == ERR_CLSD) {
2357 struct tcp_pcb *pcb = pxtcp->pcb; /* still alive */
2358
2359 DPRINTF2(("ERR_CLSD: pxtcp %p; pcb %p:"
2360 " pcb->acked %d;"
2361 " unacked %d, unsent %d, vacant %d\n",
2362 (void *)pxtcp, (void *)pcb,
2363 pcb->acked,
2364 (int)pxtcp->inbuf.unacked,
2365 (int)pxtcp->inbuf.unsent,
2366 (int)pxtcp->inbuf.vacant));
2367
2368 LWIP_ASSERT1(pxtcp->pcb == pcb);
2369 LWIP_ASSERT1(pcb->callback_arg == pxtcp);
2370
2371 if (pcb->acked > 0) {
2372 pxtcp_pcb_sent(pxtcp, pcb, pcb->acked);
2373 }
2374 return;
2375 }
2376
2377 DPRINTF0(("tcp_err: pxtcp=%p, error=%s\n",
2378 (void *)pxtcp, proxy_lwip_strerr(error)));
2379
2380 pxtcp->pcb = NULL; /* pcb is gone */
2381 if (pxtcp->deferred_delete) {
2382 pxtcp_pcb_reset_pxtcp(pxtcp);
2383 }
2384 else {
2385 pxtcp_chan_send_weak(POLLMGR_CHAN_PXTCP_RESET, pxtcp);
2386 }
2387}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette