VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/socket.c@ 64137

Last change on this file since 64137 was 63676, checked in by vboxsync, 8 years ago

NAT: soread - errno is valid only when syscall indicates failure, so
set saved sockerr to 0 on success (because POLA).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 41.6 KB
Line 
1/* $Id: socket.c 63676 2016-08-31 16:17:19Z vboxsync $ */
2/** @file
3 * NAT - socket handling.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1995 Danny Gasparovski.
22 *
23 * Please read the file COPYRIGHT for the
24 * terms and conditions of the copyright.
25 */
26
27#include <slirp.h>
28#include "ip_icmp.h"
29#include "main.h"
30#ifdef __sun__
31#include <sys/filio.h>
32#endif
33#include <VBox/vmm/pdmdrv.h>
34#if defined (RT_OS_WINDOWS)
35#include <iprt/win/iphlpapi.h>
36#include <icmpapi.h>
37#endif
38
39#if defined(DECLARE_IOVEC) && defined(RT_OS_WINDOWS)
40AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, WSABUF, buf);
41AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len, WSABUF, len);
42#endif
43
44#ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
45/**
46 *
47 */
48struct socket * soCloneUDPSocketWithForegnAddr(PNATState pData, bool fBindSocket, struct socket *pSo, uint32_t u32ForeignAddr)
49{
50 struct socket *pNewSocket = NULL;
51 LogFlowFunc(("Enter: fBindSocket:%RTbool, so:%R[natsock], u32ForeignAddr:%RTnaipv4\n", fBindSocket, pSo, u32ForeignAddr));
52 pNewSocket = socreate();
53 if (!pNewSocket)
54 {
55 LogFunc(("Can't create socket\n"));
56 LogFlowFunc(("Leave: NULL\n"));
57 return NULL;
58 }
59 if (fBindSocket)
60 {
61 if (udp_attach(pData, pNewSocket, 0) <= 0)
62 {
63 sofree(pData, pNewSocket);
64 LogFunc(("Can't attach fresh created socket\n"));
65 return NULL;
66 }
67 }
68 else
69 {
70 pNewSocket->so_cloneOf = (struct socket *)pSo;
71 pNewSocket->s = pSo->s;
72 insque(pData, pNewSocket, &udb);
73 }
74 pNewSocket->so_laddr = pSo->so_laddr;
75 pNewSocket->so_lport = pSo->so_lport;
76 pNewSocket->so_faddr.s_addr = u32ForeignAddr;
77 pNewSocket->so_fport = pSo->so_fport;
78 pSo->so_cCloneCounter++;
79 LogFlowFunc(("Leave: %R[natsock]\n", pNewSocket));
80 return pNewSocket;
81}
82
83struct socket *soLookUpClonedUDPSocket(PNATState pData, const struct socket *pcSo, uint32_t u32ForeignAddress)
84{
85 struct socket *pSoClone = NULL;
86 LogFlowFunc(("Enter: pcSo:%R[natsock], u32ForeignAddress:%RTnaipv4\n", pcSo, u32ForeignAddress));
87 for (pSoClone = udb.so_next; pSoClone != &udb; pSoClone = pSoClone->so_next)
88 {
89 if ( pSoClone->so_cloneOf
90 && pSoClone->so_cloneOf == pcSo
91 && pSoClone->so_lport == pcSo->so_lport
92 && pSoClone->so_fport == pcSo->so_fport
93 && pSoClone->so_laddr.s_addr == pcSo->so_laddr.s_addr
94 && pSoClone->so_faddr.s_addr == u32ForeignAddress)
95 goto done;
96 }
97 pSoClone = NULL;
98done:
99 LogFlowFunc(("Leave: pSoClone: %R[natsock]\n", pSoClone));
100 return pSoClone;
101}
102#endif
103
104#ifdef VBOX_WITH_NAT_SEND2HOME
105DECLINLINE(bool) slirpSend2Home(PNATState pData, struct socket *pSo, const void *pvBuf, uint32_t cbBuf, int iFlags)
106{
107 int idxAddr;
108 int ret = 0;
109 bool fSendDone = false;
110 LogFlowFunc(("Enter pSo:%R[natsock] pvBuf: %p, cbBuf: %d, iFlags: %d\n", pSo, pvBuf, cbBuf, iFlags));
111 for (idxAddr = 0; idxAddr < pData->cInHomeAddressSize; ++idxAddr)
112 {
113
114 struct socket *pNewSocket = soCloneUDPSocketWithForegnAddr(pData, pSo, pData->pInSockAddrHomeAddress[idxAddr].sin_addr);
115 AssertReturn((pNewSocket, false));
116 pData->pInSockAddrHomeAddress[idxAddr].sin_port = pSo->so_fport;
117 /** @todo more verbose on errors,
118 * @note: we shouldn't care if this send fail or not (we're in broadcast).
119 */
120 LogFunc(("send %d bytes to %RTnaipv4 from %R[natsock]\n", cbBuf, pData->pInSockAddrHomeAddress[idxAddr].sin_addr.s_addr, pNewSocket));
121 ret = sendto(pNewSocket->s, pvBuf, cbBuf, iFlags, (struct sockaddr *)&pData->pInSockAddrHomeAddress[idxAddr], sizeof(struct sockaddr_in));
122 if (ret < 0)
123 LogFunc(("Failed to send %d bytes to %RTnaipv4\n", cbBuf, pData->pInSockAddrHomeAddress[idxAddr].sin_addr.s_addr));
124 fSendDone |= ret > 0;
125 }
126 LogFlowFunc(("Leave %RTbool\n", fSendDone));
127 return fSendDone;
128}
129#endif /* !VBOX_WITH_NAT_SEND2HOME */
130
131#if !defined(RT_OS_WINDOWS)
132static void send_icmp_to_guest(PNATState, char *, size_t, const struct sockaddr_in *);
133static void sorecvfrom_icmp_unix(PNATState, struct socket *);
134#endif /* !RT_OS_WINDOWS */
135
136void
137so_init(void)
138{
139}
140
141struct socket *
142solookup(struct socket *head, struct in_addr laddr,
143 u_int lport, struct in_addr faddr, u_int fport)
144{
145 struct socket *so;
146
147 for (so = head->so_next; so != head; so = so->so_next)
148 {
149 if ( so->so_lport == lport
150 && so->so_laddr.s_addr == laddr.s_addr
151 && so->so_faddr.s_addr == faddr.s_addr
152 && so->so_fport == fport)
153 return so;
154 }
155
156 return (struct socket *)NULL;
157}
158
159/*
160 * Create a new socket, initialise the fields
161 * It is the responsibility of the caller to
162 * insque() it into the correct linked-list
163 */
164struct socket *
165socreate(void)
166{
167 struct socket *so;
168
169 so = (struct socket *)RTMemAllocZ(sizeof(struct socket));
170 if (so)
171 {
172 so->so_state = SS_NOFDREF;
173 so->s = -1;
174#if !defined(RT_OS_WINDOWS)
175 so->so_poll_index = -1;
176#endif
177 }
178 return so;
179}
180
181/*
182 * remque and free a socket, clobber cache
183 */
184void
185sofree(PNATState pData, struct socket *so)
186{
187 LogFlowFunc(("ENTER:%R[natsock]\n", so));
188 /*
189 * We should not remove socket when polling routine do the polling
190 * instead we mark it for deletion.
191 */
192 if (so->fUnderPolling)
193 {
194 so->fShouldBeRemoved = 1;
195 LogFlowFunc(("LEAVE:%R[natsock] postponed deletion\n", so));
196 return;
197 }
198 /**
199 * Check that we don't freeng socket with tcbcb
200 */
201 Assert(!sototcpcb(so));
202 /* udp checks */
203 Assert(!so->so_timeout);
204 Assert(!so->so_timeout_arg);
205 if (so == tcp_last_so)
206 tcp_last_so = &tcb;
207 else if (so == udp_last_so)
208 udp_last_so = &udb;
209
210 /* check if mbuf haven't been already freed */
211 if (so->so_m != NULL)
212 {
213 m_freem(pData, so->so_m);
214 so->so_m = NULL;
215 }
216
217 if (so->so_ohdr != NULL)
218 {
219 RTMemFree(so->so_ohdr);
220 so->so_ohdr = NULL;
221 }
222
223 if (so->so_next && so->so_prev)
224 {
225 remque(pData, so); /* crashes if so is not in a queue */
226 NSOCK_DEC();
227 }
228
229 RTMemFree(so);
230 LogFlowFuncLeave();
231}
232
233/*
234 * Read from so's socket into sb_snd, updating all relevant sbuf fields
235 * NOTE: This will only be called if it is select()ed for reading, so
236 * a read() of 0 (or less) means it's disconnected
237 */
238int
239soread(PNATState pData, struct socket *so)
240{
241 int n, nn, lss, total;
242 struct sbuf *sb = &so->so_snd;
243 u_int len = sb->sb_datalen - sb->sb_cc;
244 struct iovec iov[2];
245 int mss = so->so_tcpcb->t_maxseg;
246 int sockerr;
247
248 STAM_PROFILE_START(&pData->StatIOread, a);
249 STAM_COUNTER_RESET(&pData->StatIORead_in_1);
250 STAM_COUNTER_RESET(&pData->StatIORead_in_2);
251
252 QSOCKET_LOCK(tcb);
253 SOCKET_LOCK(so);
254 QSOCKET_UNLOCK(tcb);
255
256 LogFlow(("soread: so = %R[natsock]\n", so));
257 Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, so, sb));
258
259 /*
260 * No need to check if there's enough room to read.
261 * soread wouldn't have been called if there weren't
262 */
263
264 len = sb->sb_datalen - sb->sb_cc;
265
266 iov[0].iov_base = sb->sb_wptr;
267 iov[1].iov_base = 0;
268 iov[1].iov_len = 0;
269 if (sb->sb_wptr < sb->sb_rptr)
270 {
271 iov[0].iov_len = sb->sb_rptr - sb->sb_wptr;
272 /* Should never succeed, but... */
273 if (iov[0].iov_len > len)
274 iov[0].iov_len = len;
275 if (iov[0].iov_len > mss)
276 iov[0].iov_len -= iov[0].iov_len%mss;
277 n = 1;
278 }
279 else
280 {
281 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_wptr;
282 /* Should never succeed, but... */
283 if (iov[0].iov_len > len)
284 iov[0].iov_len = len;
285 len -= iov[0].iov_len;
286 if (len)
287 {
288 iov[1].iov_base = sb->sb_data;
289 iov[1].iov_len = sb->sb_rptr - sb->sb_data;
290 if (iov[1].iov_len > len)
291 iov[1].iov_len = len;
292 total = iov[0].iov_len + iov[1].iov_len;
293 if (total > mss)
294 {
295 lss = total % mss;
296 if (iov[1].iov_len > lss)
297 {
298 iov[1].iov_len -= lss;
299 n = 2;
300 }
301 else
302 {
303 lss -= iov[1].iov_len;
304 iov[0].iov_len -= lss;
305 n = 1;
306 }
307 }
308 else
309 n = 2;
310 }
311 else
312 {
313 if (iov[0].iov_len > mss)
314 iov[0].iov_len -= iov[0].iov_len%mss;
315 n = 1;
316 }
317 }
318
319#ifdef HAVE_READV
320 nn = readv(so->s, (struct iovec *)iov, n);
321#else
322 nn = recv(so->s, iov[0].iov_base, iov[0].iov_len, (so->so_tcpcb->t_force? MSG_OOB:0));
323#endif
324 if (nn < 0)
325 sockerr = errno; /* save it, as it may be clobbered by logging */
326 else
327 sockerr = 0;
328
329 Log2(("%s: read(1) nn = %d bytes\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn));
330 Log2(("%s: so = %R[natsock] so->so_snd = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, so, sb));
331 if (nn <= 0)
332 {
333 if (nn == 0) /* XXX: should this be inside #if defined(RT_OS_WINDOWS)? */
334 {
335 /*
336 * Special case for WSAEnumNetworkEvents: If we receive 0 bytes that
337 * _could_ mean that the connection is closed. But we will receive an
338 * FD_CLOSE event later if the connection was _really_ closed. With
339 * www.youtube.com I see this very often. Closing the socket too early
340 * would be dangerous.
341 */
342 int status;
343 unsigned long pending = 0;
344 status = ioctlsocket(so->s, FIONREAD, &pending);
345 if (status < 0)
346 Log(("NAT:%s: error in WSAIoctl: %d\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, errno));
347 if (pending != 0)
348 {
349 SOCKET_UNLOCK(so);
350 STAM_PROFILE_STOP(&pData->StatIOread, a);
351 return 0;
352 }
353 }
354
355 if ( nn < 0
356 && soIgnorableErrorCode(sockerr))
357 {
358 SOCKET_UNLOCK(so);
359 STAM_PROFILE_STOP(&pData->StatIOread, a);
360 return 0;
361 }
362 else
363 {
364 int fUninitializedTemplate = 0;
365 fUninitializedTemplate = RT_BOOL(( sototcpcb(so)
366 && ( sototcpcb(so)->t_template.ti_src.s_addr == INADDR_ANY
367 || sototcpcb(so)->t_template.ti_dst.s_addr == INADDR_ANY)));
368 /* nn == 0 means peer has performed an orderly shutdown */
369 Log2(("%s: disconnected, nn = %d, errno = %d (%s)\n",
370 RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn, sockerr, strerror(sockerr)));
371 sofcantrcvmore(so);
372 if (!fUninitializedTemplate)
373 tcp_sockclosed(pData, sototcpcb(so));
374 else
375 tcp_drop(pData, sototcpcb(so), sockerr);
376 SOCKET_UNLOCK(so);
377 STAM_PROFILE_STOP(&pData->StatIOread, a);
378 return -1;
379 }
380 }
381 STAM_STATS(
382 if (n == 1)
383 {
384 STAM_COUNTER_INC(&pData->StatIORead_in_1);
385 STAM_COUNTER_ADD(&pData->StatIORead_in_1_bytes, nn);
386 }
387 else
388 {
389 STAM_COUNTER_INC(&pData->StatIORead_in_2);
390 STAM_COUNTER_ADD(&pData->StatIORead_in_2_1st_bytes, nn);
391 }
392 );
393
394#ifndef HAVE_READV
395 /*
396 * If there was no error, try and read the second time round
397 * We read again if n = 2 (ie, there's another part of the buffer)
398 * and we read as much as we could in the first read
399 * We don't test for <= 0 this time, because there legitimately
400 * might not be any more data (since the socket is non-blocking),
401 * a close will be detected on next iteration.
402 * A return of -1 wont (shouldn't) happen, since it didn't happen above
403 */
404 if (n == 2 && (unsigned)nn == iov[0].iov_len)
405 {
406 int ret;
407 ret = recv(so->s, iov[1].iov_base, iov[1].iov_len, 0);
408 if (ret > 0)
409 nn += ret;
410 STAM_STATS(
411 if (ret > 0)
412 {
413 STAM_COUNTER_INC(&pData->StatIORead_in_2);
414 STAM_COUNTER_ADD(&pData->StatIORead_in_2_2nd_bytes, ret);
415 }
416 );
417 }
418
419 Log2(("%s: read(2) nn = %d bytes\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn));
420#endif
421
422 /* Update fields */
423 sb->sb_cc += nn;
424 sb->sb_wptr += nn;
425 Log2(("%s: update so_snd (readed nn = %d) %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn, sb));
426 if (sb->sb_wptr >= (sb->sb_data + sb->sb_datalen))
427 {
428 sb->sb_wptr -= sb->sb_datalen;
429 Log2(("%s: alter sb_wptr so_snd = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, sb));
430 }
431 STAM_PROFILE_STOP(&pData->StatIOread, a);
432 SOCKET_UNLOCK(so);
433 return nn;
434}
435
436/*
437 * Get urgent data
438 *
439 * When the socket is created, we set it SO_OOBINLINE,
440 * so when OOB data arrives, we soread() it and everything
441 * in the send buffer is sent as urgent data
442 */
443void
444sorecvoob(PNATState pData, struct socket *so)
445{
446 struct tcpcb *tp = sototcpcb(so);
447 ssize_t ret;
448
449 LogFlowFunc(("sorecvoob: so = %R[natsock]\n", so));
450
451 /*
452 * We take a guess at how much urgent data has arrived.
453 * In most situations, when urgent data arrives, the next
454 * read() should get all the urgent data. This guess will
455 * be wrong however if more data arrives just after the
456 * urgent data, or the read() doesn't return all the
457 * urgent data.
458 */
459 ret = soread(pData, so);
460 if (RT_LIKELY(ret > 0))
461 {
462 tp->snd_up = tp->snd_una + SBUF_LEN(&so->so_snd);
463 tp->t_force = 1;
464 tcp_output(pData, tp);
465 tp->t_force = 0;
466 }
467}
468
469/*
470 * Send urgent data
471 * There's a lot duplicated code here, but...
472 */
473int
474sosendoob(struct socket *so)
475{
476 struct sbuf *sb = &so->so_rcv;
477 char buff[2048]; /* XXX Shouldn't be sending more oob data than this */
478
479 int n, len;
480
481 LogFlowFunc(("sosendoob so = %R[natsock]\n", so));
482
483 if (so->so_urgc > sizeof(buff))
484 so->so_urgc = sizeof(buff); /* XXX */
485
486 if (sb->sb_rptr < sb->sb_wptr)
487 {
488 /* We can send it directly */
489 n = send(so->s, sb->sb_rptr, so->so_urgc, (MSG_OOB)); /* |MSG_DONTWAIT)); */
490 so->so_urgc -= n;
491
492 Log2((" --- sent %d bytes urgent data, %d urgent bytes left\n",
493 n, so->so_urgc));
494 }
495 else
496 {
497 /*
498 * Since there's no sendv or sendtov like writev,
499 * we must copy all data to a linear buffer then
500 * send it all
501 */
502 len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
503 if (len > so->so_urgc)
504 len = so->so_urgc;
505 memcpy(buff, sb->sb_rptr, len);
506 so->so_urgc -= len;
507 if (so->so_urgc)
508 {
509 n = sb->sb_wptr - sb->sb_data;
510 if (n > so->so_urgc)
511 n = so->so_urgc;
512 memcpy(buff + len, sb->sb_data, n);
513 so->so_urgc -= n;
514 len += n;
515 }
516 n = send(so->s, buff, len, (MSG_OOB)); /* |MSG_DONTWAIT)); */
517#ifdef DEBUG
518 if (n != len)
519 Log(("Didn't send all data urgently XXXXX\n"));
520#endif
521 Log2((" ---2 sent %d bytes urgent data, %d urgent bytes left\n",
522 n, so->so_urgc));
523 }
524
525 sb->sb_cc -= n;
526 sb->sb_rptr += n;
527 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
528 sb->sb_rptr -= sb->sb_datalen;
529
530 return n;
531}
532
533/*
534 * Write data from so_rcv to so's socket,
535 * updating all sbuf field as necessary
536 */
537int
538sowrite(PNATState pData, struct socket *so)
539{
540 int n, nn;
541 struct sbuf *sb = &so->so_rcv;
542 u_int len = sb->sb_cc;
543 struct iovec iov[2];
544
545 STAM_PROFILE_START(&pData->StatIOwrite, a);
546 STAM_COUNTER_RESET(&pData->StatIOWrite_in_1);
547 STAM_COUNTER_RESET(&pData->StatIOWrite_in_1_bytes);
548 STAM_COUNTER_RESET(&pData->StatIOWrite_in_2);
549 STAM_COUNTER_RESET(&pData->StatIOWrite_in_2_1st_bytes);
550 STAM_COUNTER_RESET(&pData->StatIOWrite_in_2_2nd_bytes);
551 STAM_COUNTER_RESET(&pData->StatIOWrite_no_w);
552 STAM_COUNTER_RESET(&pData->StatIOWrite_rest);
553 STAM_COUNTER_RESET(&pData->StatIOWrite_rest_bytes);
554 LogFlowFunc(("so = %R[natsock]\n", so));
555 Log2(("%s: so = %R[natsock] so->so_rcv = %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, so, sb));
556 QSOCKET_LOCK(tcb);
557 SOCKET_LOCK(so);
558 QSOCKET_UNLOCK(tcb);
559 if (so->so_urgc)
560 {
561 sosendoob(so);
562 if (sb->sb_cc == 0)
563 {
564 SOCKET_UNLOCK(so);
565 STAM_PROFILE_STOP(&pData->StatIOwrite, a);
566 return 0;
567 }
568 }
569
570 /*
571 * No need to check if there's something to write,
572 * sowrite wouldn't have been called otherwise
573 */
574
575 len = sb->sb_cc;
576
577 iov[0].iov_base = sb->sb_rptr;
578 iov[1].iov_base = 0;
579 iov[1].iov_len = 0;
580 if (sb->sb_rptr < sb->sb_wptr)
581 {
582 iov[0].iov_len = sb->sb_wptr - sb->sb_rptr;
583 /* Should never succeed, but... */
584 if (iov[0].iov_len > len)
585 iov[0].iov_len = len;
586 n = 1;
587 }
588 else
589 {
590 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
591 if (iov[0].iov_len > len)
592 iov[0].iov_len = len;
593 len -= iov[0].iov_len;
594 if (len)
595 {
596 iov[1].iov_base = sb->sb_data;
597 iov[1].iov_len = sb->sb_wptr - sb->sb_data;
598 if (iov[1].iov_len > len)
599 iov[1].iov_len = len;
600 n = 2;
601 }
602 else
603 n = 1;
604 }
605 STAM_STATS({
606 if (n == 1)
607 {
608 STAM_COUNTER_INC(&pData->StatIOWrite_in_1);
609 STAM_COUNTER_ADD(&pData->StatIOWrite_in_1_bytes, iov[0].iov_len);
610 }
611 else
612 {
613 STAM_COUNTER_INC(&pData->StatIOWrite_in_2);
614 STAM_COUNTER_ADD(&pData->StatIOWrite_in_2_1st_bytes, iov[0].iov_len);
615 STAM_COUNTER_ADD(&pData->StatIOWrite_in_2_2nd_bytes, iov[1].iov_len);
616 }
617 });
618 /* Check if there's urgent data to send, and if so, send it */
619#ifdef HAVE_READV
620 nn = writev(so->s, (const struct iovec *)iov, n);
621#else
622 nn = send(so->s, iov[0].iov_base, iov[0].iov_len, 0);
623#endif
624 Log2(("%s: wrote(1) nn = %d bytes\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn));
625 /* This should never happen, but people tell me it does *shrug* */
626 if ( nn < 0
627 && soIgnorableErrorCode(errno))
628 {
629 SOCKET_UNLOCK(so);
630 STAM_PROFILE_STOP(&pData->StatIOwrite, a);
631 return 0;
632 }
633
634 if (nn < 0 || (nn == 0 && iov[0].iov_len > 0))
635 {
636 Log2(("%s: disconnected, so->so_state = %x, errno = %d\n",
637 RT_GCC_EXTENSION __PRETTY_FUNCTION__, so->so_state, errno));
638 sofcantsendmore(so);
639 tcp_sockclosed(pData, sototcpcb(so));
640 SOCKET_UNLOCK(so);
641 STAM_PROFILE_STOP(&pData->StatIOwrite, a);
642 return -1;
643 }
644
645#ifndef HAVE_READV
646 if (n == 2 && (unsigned)nn == iov[0].iov_len)
647 {
648 int ret;
649 ret = send(so->s, iov[1].iov_base, iov[1].iov_len, 0);
650 if (ret > 0)
651 nn += ret;
652# ifdef VBOX_WITH_STATISTICS
653 if (ret > 0 && ret != (ssize_t)iov[1].iov_len)
654 {
655 STAM_COUNTER_INC(&pData->StatIOWrite_rest);
656 STAM_COUNTER_ADD(&pData->StatIOWrite_rest_bytes, (iov[1].iov_len - ret));
657 }
658#endif
659 }
660 Log2(("%s: wrote(2) nn = %d bytes\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn));
661#endif
662
663 /* Update sbuf */
664 sb->sb_cc -= nn;
665 sb->sb_rptr += nn;
666 Log2(("%s: update so_rcv (written nn = %d) %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, nn, sb));
667 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
668 {
669 sb->sb_rptr -= sb->sb_datalen;
670 Log2(("%s: alter sb_rptr of so_rcv %R[sbuf]\n", RT_GCC_EXTENSION __PRETTY_FUNCTION__, sb));
671 }
672
673 /*
674 * If in DRAIN mode, and there's no more data, set
675 * it CANTSENDMORE
676 */
677 if ((so->so_state & SS_FWDRAIN) && sb->sb_cc == 0)
678 sofcantsendmore(so);
679
680 SOCKET_UNLOCK(so);
681 STAM_PROFILE_STOP(&pData->StatIOwrite, a);
682 return nn;
683}
684
685/*
686 * recvfrom() a UDP socket
687 */
688void
689sorecvfrom(PNATState pData, struct socket *so)
690{
691 LogFlowFunc(("sorecvfrom: so = %p\n", so));
692
693#ifdef RT_OS_WINDOWS
694 /* ping is handled with ICMP API in ip_icmpwin.c */
695 Assert(so->so_type == IPPROTO_UDP);
696#else
697 if (so->so_type == IPPROTO_ICMP)
698 {
699 /* This is a "ping" reply */
700 sorecvfrom_icmp_unix(pData, so);
701 udp_detach(pData, so);
702 }
703 else
704#endif /* !RT_OS_WINDOWS */
705 {
706 static char achBuf[64 * 1024];
707
708 /* A "normal" UDP packet */
709 struct sockaddr_in addr;
710 socklen_t addrlen = sizeof(struct sockaddr_in);
711 struct iovec iov[2];
712 ssize_t nread;
713 struct mbuf *m;
714
715 QSOCKET_LOCK(udb);
716 SOCKET_LOCK(so);
717 QSOCKET_UNLOCK(udb);
718
719 m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, slirp_size(pData));
720 if (m == NULL)
721 {
722 SOCKET_UNLOCK(so);
723 return;
724 }
725
726 m->m_data += ETH_HLEN;
727 m->m_pkthdr.header = mtod(m, void *);
728
729 m->m_data += sizeof(struct udpiphdr);
730
731 /* small packets will fit without copying */
732 iov[0].iov_base = mtod(m, char *);
733 iov[0].iov_len = M_TRAILINGSPACE(m);
734
735 /* large packets will spill into a temp buffer */
736 iov[1].iov_base = achBuf;
737 iov[1].iov_len = sizeof(achBuf);
738
739#if !defined(RT_OS_WINDOWS)
740 {
741 struct msghdr mh;
742 memset(&mh, 0, sizeof(mh));
743
744 mh.msg_iov = iov;
745 mh.msg_iovlen = 2;
746 mh.msg_name = &addr;
747 mh.msg_namelen = addrlen;
748
749 nread = recvmsg(so->s, &mh, 0);
750 }
751#else /* RT_OS_WINDOWS */
752 {
753 DWORD nbytes; /* NB: can't use nread b/c of different size */
754 DWORD flags = 0;
755 int status;
756 AssertCompile(sizeof(WSABUF) == sizeof(struct iovec));
757 AssertCompileMembersSameSizeAndOffset(WSABUF, len, struct iovec, iov_len);
758 AssertCompileMembersSameSizeAndOffset(WSABUF, buf, struct iovec, iov_base);
759 status = WSARecvFrom(so->s, (WSABUF *)&iov[0], 2, &nbytes, &flags,
760 (struct sockaddr *)&addr, &addrlen,
761 NULL, NULL);
762 if (status != SOCKET_ERROR)
763 nread = nbytes;
764 else
765 nread = -1;
766 }
767#endif
768 if (nread >= 0)
769 {
770 if (nread <= iov[0].iov_len)
771 m->m_len = nread;
772 else
773 {
774 m->m_len = iov[0].iov_len;
775 m_append(pData, m, nread - iov[0].iov_len, iov[1].iov_base);
776 }
777 Assert(m_length(m, NULL) == (size_t)nread);
778
779 /*
780 * Hack: domain name lookup will be used the most for UDP,
781 * and since they'll only be used once there's no need
782 * for the 4 minute (or whatever) timeout... So we time them
783 * out much quicker (10 seconds for now...)
784 */
785 if (so->so_expire)
786 {
787 if (so->so_fport != RT_H2N_U16_C(53))
788 so->so_expire = curtime + SO_EXPIRE;
789 }
790
791 /*
792 * DNS proxy requests are forwarded to the real resolver,
793 * but its socket's so_faddr is that of the DNS proxy
794 * itself.
795 *
796 * last argument should be changed if Slirp will inject IP attributes
797 */
798 if ( pData->fUseDnsProxy
799 && so->so_fport == RT_H2N_U16_C(53)
800 && CTL_CHECK(so->so_faddr.s_addr, CTL_DNS))
801 dnsproxy_answer(pData, so, m);
802
803 /* packets definetly will be fragmented, could confuse receiver peer. */
804 if (nread > if_mtu)
805 m->m_flags |= M_SKIP_FIREWALL;
806
807 /*
808 * If this packet was destined for CTL_ADDR,
809 * make it look like that's where it came from, done by udp_output
810 */
811 udp_output(pData, so, m, &addr);
812 }
813 else
814 {
815 m_freem(pData, m);
816
817 if (!soIgnorableErrorCode(errno))
818 {
819 u_char code;
820 if (errno == EHOSTUNREACH)
821 code = ICMP_UNREACH_HOST;
822 else if (errno == ENETUNREACH)
823 code = ICMP_UNREACH_NET;
824 else
825 code = ICMP_UNREACH_PORT;
826
827 Log2((" rx error, tx icmp ICMP_UNREACH:%i\n", code));
828 icmp_error(pData, so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
829 so->so_m = NULL;
830 }
831 }
832
833 SOCKET_UNLOCK(so);
834 }
835}
836
837/*
838 * sendto() a socket
839 */
840int
841sosendto(PNATState pData, struct socket *so, struct mbuf *m)
842{
843 int ret;
844 struct sockaddr_in *paddr;
845 struct sockaddr addr;
846#if 0
847 struct sockaddr_in host_addr;
848#endif
849 caddr_t buf = 0;
850 int mlen;
851
852 LogFlowFunc(("sosendto: so = %R[natsock], m = %p\n", so, m));
853
854 memset(&addr, 0, sizeof(struct sockaddr));
855#ifdef RT_OS_DARWIN
856 addr.sa_len = sizeof(struct sockaddr_in);
857#endif
858 paddr = (struct sockaddr_in *)&addr;
859 paddr->sin_family = AF_INET;
860 if ((so->so_faddr.s_addr & RT_H2N_U32(pData->netmask)) == pData->special_addr.s_addr)
861 {
862 /* It's an alias */
863 uint32_t last_byte = RT_N2H_U32(so->so_faddr.s_addr) & ~pData->netmask;
864 switch(last_byte)
865 {
866#if 0
867 /* handle this case at 'default:' */
868 case CTL_BROADCAST:
869 addr.sin_addr.s_addr = INADDR_BROADCAST;
870 /* Send the packet to host to fully emulate broadcast */
871 /** @todo r=klaus: on Linux host this causes the host to receive
872 * the packet twice for some reason. And I cannot find any place
873 * in the man pages which states that sending a broadcast does not
874 * reach the host itself. */
875 host_addr.sin_family = AF_INET;
876 host_addr.sin_port = so->so_fport;
877 host_addr.sin_addr = our_addr;
878 sendto(so->s, m->m_data, m->m_len, 0,
879 (struct sockaddr *)&host_addr, sizeof (struct sockaddr));
880 break;
881#endif
882 case CTL_DNS:
883 case CTL_ALIAS:
884 default:
885 if (last_byte == ~pData->netmask)
886 paddr->sin_addr.s_addr = INADDR_BROADCAST;
887 else
888 paddr->sin_addr = loopback_addr;
889 break;
890 }
891 }
892 else
893 paddr->sin_addr = so->so_faddr;
894 paddr->sin_port = so->so_fport;
895
896 Log2((" sendto()ing, addr.sin_port=%d, addr.sin_addr.s_addr=%.16s\n",
897 RT_N2H_U16(paddr->sin_port), inet_ntoa(paddr->sin_addr)));
898
899 /* Don't care what port we get */
900 /*
901 * > nmap -sV -T4 -O -A -v -PU3483 255.255.255.255
902 * generates bodyless messages, annoying memmory management system.
903 */
904 mlen = m_length(m, NULL);
905 if (mlen > 0)
906 {
907 buf = RTMemAlloc(mlen);
908 if (buf == NULL)
909 {
910 return -1;
911 }
912 m_copydata(m, 0, mlen, buf);
913 }
914 ret = sendto(so->s, buf, mlen, 0,
915 (struct sockaddr *)&addr, sizeof (struct sockaddr));
916#ifdef VBOX_WITH_NAT_SEND2HOME
917 if (slirpIsWideCasting(pData, so->so_faddr.s_addr))
918 {
919 slirpSend2Home(pData, so, buf, mlen, 0);
920 }
921#endif
922 if (buf)
923 RTMemFree(buf);
924 if (ret < 0)
925 {
926 Log2(("UDP: sendto fails (%s)\n", strerror(errno)));
927 return -1;
928 }
929
930 /*
931 * Kill the socket if there's no reply in 4 minutes,
932 * but only if it's an expirable socket
933 */
934 if (so->so_expire)
935 so->so_expire = curtime + SO_EXPIRE;
936 so->so_state = SS_ISFCONNECTED; /* So that it gets select()ed */
937 return 0;
938}
939
940/*
941 * XXX This should really be tcp_listen
942 */
943struct socket *
944solisten(PNATState pData, u_int32_t bind_addr, u_int port, u_int32_t laddr, u_int lport, int flags)
945{
946 struct sockaddr_in addr;
947 struct socket *so;
948 socklen_t addrlen = sizeof(addr);
949 int s, opt = 1;
950 int status;
951
952 LogFlowFunc(("solisten: port = %d, laddr = %x, lport = %d, flags = %x\n", port, laddr, lport, flags));
953
954 if ((so = socreate()) == NULL)
955 {
956 /* RTMemFree(so); Not sofree() ??? free(NULL) == NOP */
957 return NULL;
958 }
959
960 /* Don't tcp_attach... we don't need so_snd nor so_rcv */
961 if ((so->so_tcpcb = tcp_newtcpcb(pData, so)) == NULL)
962 {
963 RTMemFree(so);
964 return NULL;
965 }
966
967 SOCKET_LOCK_CREATE(so);
968 SOCKET_LOCK(so);
969 QSOCKET_LOCK(tcb);
970 insque(pData, so,&tcb);
971 NSOCK_INC();
972 QSOCKET_UNLOCK(tcb);
973
974 /*
975 * SS_FACCEPTONCE sockets must time out.
976 */
977 if (flags & SS_FACCEPTONCE)
978 so->so_tcpcb->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT*2;
979
980 so->so_state = (SS_FACCEPTCONN|flags);
981 so->so_lport = lport; /* Kept in network format */
982 so->so_laddr.s_addr = laddr; /* Ditto */
983
984 memset(&addr, 0, sizeof(addr));
985#ifdef RT_OS_DARWIN
986 addr.sin_len = sizeof(addr);
987#endif
988 addr.sin_family = AF_INET;
989 addr.sin_addr.s_addr = bind_addr;
990 addr.sin_port = port;
991
992 /**
993 * changing listen(,1->SOMAXCONN) shouldn't be harmful for NAT's TCP/IP stack,
994 * kernel will choose the optimal value for requests queue length.
995 * @note: MSDN recommends low (2-4) values for bluetooth networking devices.
996 */
997 if ( ((s = socket(AF_INET, SOCK_STREAM, 0)) < 0)
998 || (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,(char *)&opt, sizeof(int)) < 0)
999 || (bind(s,(struct sockaddr *)&addr, sizeof(addr)) < 0)
1000 || (listen(s, pData->soMaxConn) < 0))
1001 {
1002#ifdef RT_OS_WINDOWS
1003 int tmperrno = WSAGetLastError(); /* Don't clobber the real reason we failed */
1004 closesocket(s);
1005 QSOCKET_LOCK(tcb);
1006 sofree(pData, so);
1007 QSOCKET_UNLOCK(tcb);
1008 /* Restore the real errno */
1009 WSASetLastError(tmperrno);
1010#else
1011 int tmperrno = errno; /* Don't clobber the real reason we failed */
1012 close(s);
1013 if (sototcpcb(so))
1014 tcp_close(pData, sototcpcb(so));
1015 else
1016 sofree(pData, so);
1017 /* Restore the real errno */
1018 errno = tmperrno;
1019#endif
1020 return NULL;
1021 }
1022 fd_nonblock(s);
1023 setsockopt(s, SOL_SOCKET, SO_OOBINLINE,(char *)&opt, sizeof(int));
1024
1025 getsockname(s,(struct sockaddr *)&addr,&addrlen);
1026 so->so_fport = addr.sin_port;
1027 /* set socket buffers */
1028 opt = pData->socket_rcv;
1029 status = setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&opt, sizeof(int));
1030 if (status < 0)
1031 {
1032 LogRel(("NAT: Error(%d) while setting RCV capacity to (%d)\n", errno, opt));
1033 goto no_sockopt;
1034 }
1035 opt = pData->socket_snd;
1036 status = setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&opt, sizeof(int));
1037 if (status < 0)
1038 {
1039 LogRel(("NAT: Error(%d) while setting SND capacity to (%d)\n", errno, opt));
1040 goto no_sockopt;
1041 }
1042no_sockopt:
1043 if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr)
1044 so->so_faddr = alias_addr;
1045 else
1046 so->so_faddr = addr.sin_addr;
1047
1048 so->s = s;
1049 SOCKET_UNLOCK(so);
1050 return so;
1051}
1052
1053/*
1054 * Data is available in so_rcv
1055 * Just write() the data to the socket
1056 * XXX not yet...
1057 * @todo do we really need this function, what it's intended to do?
1058 */
1059void
1060sorwakeup(struct socket *so)
1061{
1062 NOREF(so);
1063#if 0
1064 sowrite(so);
1065 FD_CLR(so->s,&writefds);
1066#endif
1067}
1068
1069/*
1070 * Data has been freed in so_snd
1071 * We have room for a read() if we want to
1072 * For now, don't read, it'll be done in the main loop
1073 */
1074void
1075sowwakeup(struct socket *so)
1076{
1077 NOREF(so);
1078}
1079
1080/*
1081 * Various session state calls
1082 * XXX Should be #define's
1083 * The socket state stuff needs work, these often get call 2 or 3
1084 * times each when only 1 was needed
1085 */
1086void
1087soisfconnecting(struct socket *so)
1088{
1089 so->so_state &= ~(SS_NOFDREF|SS_ISFCONNECTED|SS_FCANTRCVMORE|
1090 SS_FCANTSENDMORE|SS_FWDRAIN);
1091 so->so_state |= SS_ISFCONNECTING; /* Clobber other states */
1092}
1093
1094void
1095soisfconnected(struct socket *so)
1096{
1097 LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
1098 so->so_state &= ~(SS_ISFCONNECTING|SS_FWDRAIN|SS_NOFDREF);
1099 so->so_state |= SS_ISFCONNECTED; /* Clobber other states */
1100 LogFlowFunc(("LEAVE: so:%R[natsock]\n", so));
1101}
1102
1103void
1104sofcantrcvmore(struct socket *so)
1105{
1106 LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
1107 if ((so->so_state & SS_NOFDREF) == 0)
1108 {
1109 shutdown(so->s, 0);
1110 }
1111 so->so_state &= ~(SS_ISFCONNECTING);
1112 if (so->so_state & SS_FCANTSENDMORE)
1113 so->so_state = SS_NOFDREF; /* Don't select it */
1114 /* XXX close() here as well? */
1115 else
1116 so->so_state |= SS_FCANTRCVMORE;
1117 LogFlowFuncLeave();
1118}
1119
1120void
1121sofcantsendmore(struct socket *so)
1122{
1123 LogFlowFunc(("ENTER: so:%R[natsock]\n", so));
1124 if ((so->so_state & SS_NOFDREF) == 0)
1125 shutdown(so->s, 1); /* send FIN to fhost */
1126
1127 so->so_state &= ~(SS_ISFCONNECTING);
1128 if (so->so_state & SS_FCANTRCVMORE)
1129 so->so_state = SS_NOFDREF; /* as above */
1130 else
1131 so->so_state |= SS_FCANTSENDMORE;
1132 LogFlowFuncLeave();
1133}
1134
1135void
1136soisfdisconnected(struct socket *so)
1137{
1138 NOREF(so);
1139#if 0
1140 so->so_state &= ~(SS_ISFCONNECTING|SS_ISFCONNECTED);
1141 close(so->s);
1142 so->so_state = SS_ISFDISCONNECTED;
1143 /*
1144 * XXX Do nothing ... ?
1145 */
1146#endif
1147}
1148
1149/*
1150 * Set write drain mode
1151 * Set CANTSENDMORE once all data has been write()n
1152 */
1153void
1154sofwdrain(struct socket *so)
1155{
1156 if (SBUF_LEN(&so->so_rcv))
1157 so->so_state |= SS_FWDRAIN;
1158 else
1159 sofcantsendmore(so);
1160}
1161
1162#if !defined(RT_OS_WINDOWS)
1163static void
1164send_icmp_to_guest(PNATState pData, char *buff, size_t len, const struct sockaddr_in *addr)
1165{
1166 struct ip *ip;
1167 uint32_t dst, src;
1168 char ip_copy[256];
1169 struct icmp *icp;
1170 int old_ip_len = 0;
1171 int hlen, original_hlen = 0;
1172 struct mbuf *m;
1173 struct icmp_msg *icm;
1174 uint8_t proto;
1175 int type = 0;
1176
1177 ip = (struct ip *)buff;
1178 /* Fix ip->ip_len to contain the total packet length including the header
1179 * in _host_ byte order for all OSes. On Darwin, that value already is in
1180 * host byte order. Solaris and Darwin report only the payload. */
1181#ifndef RT_OS_DARWIN
1182 ip->ip_len = RT_N2H_U16(ip->ip_len);
1183#endif
1184 hlen = (ip->ip_hl << 2);
1185#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
1186 ip->ip_len += hlen;
1187#endif
1188 if (ip->ip_len < hlen + ICMP_MINLEN)
1189 {
1190 Log(("send_icmp_to_guest: ICMP header is too small to understand which type/subtype of the datagram\n"));
1191 return;
1192 }
1193 icp = (struct icmp *)((char *)ip + hlen);
1194
1195 Log(("ICMP:received msg(t:%d, c:%d)\n", icp->icmp_type, icp->icmp_code));
1196 if ( icp->icmp_type != ICMP_ECHOREPLY
1197 && icp->icmp_type != ICMP_TIMXCEED
1198 && icp->icmp_type != ICMP_UNREACH)
1199 {
1200 return;
1201 }
1202
1203 /*
1204 * ICMP_ECHOREPLY, ICMP_TIMXCEED, ICMP_UNREACH minimal header size is
1205 * ICMP_ECHOREPLY assuming data 0
1206 * icmp_{type(8), code(8), cksum(16),identifier(16),seqnum(16)}
1207 */
1208 if (ip->ip_len < hlen + 8)
1209 {
1210 Log(("send_icmp_to_guest: NAT accept ICMP_{ECHOREPLY, TIMXCEED, UNREACH} the minimum size is 64 (see rfc792)\n"));
1211 return;
1212 }
1213
1214 type = icp->icmp_type;
1215 if ( type == ICMP_TIMXCEED
1216 || type == ICMP_UNREACH)
1217 {
1218 /*
1219 * ICMP_TIMXCEED, ICMP_UNREACH minimal header size is
1220 * icmp_{type(8), code(8), cksum(16),unused(32)} + IP header + 64 bit of original datagram
1221 */
1222 if (ip->ip_len < hlen + 2*8 + sizeof(struct ip))
1223 {
1224 Log(("send_icmp_to_guest: NAT accept ICMP_{TIMXCEED, UNREACH} the minimum size of ipheader + 64 bit of data (see rfc792)\n"));
1225 return;
1226 }
1227 ip = &icp->icmp_ip;
1228 }
1229
1230 icm = icmp_find_original_mbuf(pData, ip);
1231 if (icm == NULL)
1232 {
1233 Log(("NAT: Can't find the corresponding packet for the received ICMP\n"));
1234 return;
1235 }
1236
1237 m = icm->im_m;
1238 if (!m)
1239 {
1240 LogFunc(("%R[natsock] hasn't stored it's mbuf on sent\n", icm->im_so));
1241 goto done;
1242 }
1243
1244 src = addr->sin_addr.s_addr;
1245 if (type == ICMP_ECHOREPLY)
1246 {
1247 struct ip *ip0 = mtod(m, struct ip *);
1248 struct icmp *icp0 = (struct icmp *)((char *)ip0 + (ip0->ip_hl << 2));
1249 if (icp0->icmp_type != ICMP_ECHO)
1250 {
1251 Log(("NAT: we haven't found echo for this reply\n"));
1252 goto done;
1253 }
1254 /*
1255 * while combining buffer to send (see ip_icmp.c) we control ICMP header only,
1256 * IP header combined by OS network stack, our local copy of IP header contians values
1257 * in host byte order so no byte order conversion is required. IP headers fields are converting
1258 * in ip_output0 routine only.
1259 */
1260 if ( (ip->ip_len - hlen)
1261 != (ip0->ip_len - (ip0->ip_hl << 2)))
1262 {
1263 Log(("NAT: ECHO(%d) lenght doesn't match ECHOREPLY(%d)\n",
1264 (ip->ip_len - hlen), (ip0->ip_len - (ip0->ip_hl << 2))));
1265 goto done;
1266 }
1267 }
1268
1269 /* ip points on origianal ip header */
1270 ip = mtod(m, struct ip *);
1271 proto = ip->ip_p;
1272 /* Now ip is pointing on header we've sent from guest */
1273 if ( icp->icmp_type == ICMP_TIMXCEED
1274 || icp->icmp_type == ICMP_UNREACH)
1275 {
1276 old_ip_len = (ip->ip_hl << 2) + 64;
1277 if (old_ip_len > sizeof(ip_copy))
1278 old_ip_len = sizeof(ip_copy);
1279 memcpy(ip_copy, ip, old_ip_len);
1280 }
1281
1282 /* source address from original IP packet*/
1283 dst = ip->ip_src.s_addr;
1284
1285 /* overide ther tail of old packet */
1286 ip = mtod(m, struct ip *); /* ip is from mbuf we've overrided */
1287 original_hlen = ip->ip_hl << 2;
1288 /* saves original ip header and options */
1289 m_copyback(pData, m, original_hlen, len - hlen, buff + hlen);
1290 ip->ip_len = m_length(m, NULL);
1291 ip->ip_p = IPPROTO_ICMP; /* the original package could be whatever, but we're response via ICMP*/
1292
1293 icp = (struct icmp *)((char *)ip + (ip->ip_hl << 2));
1294 type = icp->icmp_type;
1295 if ( type == ICMP_TIMXCEED
1296 || type == ICMP_UNREACH)
1297 {
1298 /* according RFC 793 error messages required copy of initial IP header + 64 bit */
1299 memcpy(&icp->icmp_ip, ip_copy, old_ip_len);
1300
1301 /* undo byte order conversions done in ip_input() */
1302 HTONS(icp->icmp_ip.ip_len);
1303 HTONS(icp->icmp_ip.ip_id);
1304 HTONS(icp->icmp_ip.ip_off);
1305
1306 ip->ip_tos = ((ip->ip_tos & 0x1E) | 0xC0); /* high priority for errors */
1307 }
1308
1309 ip->ip_src.s_addr = src;
1310 ip->ip_dst.s_addr = dst;
1311 icmp_reflect(pData, m);
1312 /* m was freed */
1313 icm->im_m = NULL;
1314
1315 done:
1316 icmp_msg_delete(pData, icm);
1317}
1318
1319static void sorecvfrom_icmp_unix(PNATState pData, struct socket *so)
1320{
1321 struct sockaddr_in addr;
1322 socklen_t addrlen = sizeof(struct sockaddr_in);
1323 struct ip ip;
1324 char *buff;
1325 int len = 0;
1326
1327 /* 1- step: read the ip header */
1328 len = recvfrom(so->s, &ip, sizeof(struct ip), MSG_PEEK,
1329 (struct sockaddr *)&addr, &addrlen);
1330 if ( len < 0
1331 && ( soIgnorableErrorCode(errno)
1332 || errno == ENOTCONN))
1333 {
1334 Log(("sorecvfrom_icmp_unix: 1 - step can't read IP datagramm (would block)\n"));
1335 return;
1336 }
1337
1338 if ( len < sizeof(struct ip)
1339 || len < 0
1340 || len == 0)
1341 {
1342 u_char code;
1343 code = ICMP_UNREACH_PORT;
1344
1345 if (errno == EHOSTUNREACH)
1346 code = ICMP_UNREACH_HOST;
1347 else if (errno == ENETUNREACH)
1348 code = ICMP_UNREACH_NET;
1349
1350 LogRel(("NAT: UDP ICMP rx errno=%d (%s)\n", errno, strerror(errno)));
1351 icmp_error(pData, so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
1352 so->so_m = NULL;
1353 Log(("sorecvfrom_icmp_unix: 1 - step can't read IP datagramm\n"));
1354 return;
1355 }
1356 /* basic check of IP header */
1357 if ( ip.ip_v != IPVERSION
1358# ifndef RT_OS_DARWIN
1359 || ip.ip_p != IPPROTO_ICMP
1360# endif
1361 )
1362 {
1363 Log(("sorecvfrom_icmp_unix: 1 - step IP isn't IPv4\n"));
1364 return;
1365 }
1366# ifndef RT_OS_DARWIN
1367 /* Darwin reports the IP length already in host byte order. */
1368 ip.ip_len = RT_N2H_U16(ip.ip_len);
1369# endif
1370# if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
1371 /* Solaris and Darwin report the payload only */
1372 ip.ip_len += (ip.ip_hl << 2);
1373# endif
1374 /* Note: ip->ip_len in host byte order (all OS) */
1375 len = ip.ip_len;
1376 buff = RTMemAlloc(len);
1377 if (buff == NULL)
1378 {
1379 Log(("sorecvfrom_icmp_unix: 1 - step can't allocate enought room for datagram\n"));
1380 return;
1381 }
1382 /* 2 - step: we're reading rest of the datagramm to the buffer */
1383 addrlen = sizeof(struct sockaddr_in);
1384 memset(&addr, 0, addrlen);
1385 len = recvfrom(so->s, buff, len, 0,
1386 (struct sockaddr *)&addr, &addrlen);
1387 if ( len < 0
1388 && ( soIgnorableErrorCode(errno)
1389 || errno == ENOTCONN))
1390 {
1391 Log(("sorecvfrom_icmp_unix: 2 - step can't read IP body (would block expected:%d)\n",
1392 ip.ip_len));
1393 RTMemFree(buff);
1394 return;
1395 }
1396 if ( len < 0
1397 || len == 0)
1398 {
1399 Log(("sorecvfrom_icmp_unix: 2 - step read of the rest of datagramm is fallen (errno:%d, len:%d expected: %d)\n",
1400 errno, len, (ip.ip_len - sizeof(struct ip))));
1401 RTMemFree(buff);
1402 return;
1403 }
1404 /* len is modified in 2nd read, when the rest of the datagramm was read */
1405 send_icmp_to_guest(pData, buff, len, &addr);
1406 RTMemFree(buff);
1407}
1408#endif /* !RT_OS_WINDOWS */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette