VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 40284

Last change on this file since 40284 was 40284, checked in by vboxsync, 13 years ago

NAT: htip->tip (CTL_CHECK) takes parameter in the network format. The switch selection repeats condition of
the branch.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 72.0 KB
Line 
1/* $Id: slirp.c 40284 2012-02-29 02:35:55Z vboxsync $ */
2/** @file
3 * NAT - slirp glue.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * libslirp glue
22 *
23 * Copyright (c) 2004-2008 Fabrice Bellard
24 *
25 * Permission is hereby granted, free of charge, to any person obtaining a copy
26 * of this software and associated documentation files (the "Software"), to deal
27 * in the Software without restriction, including without limitation the rights
28 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29 * copies of the Software, and to permit persons to whom the Software is
30 * furnished to do so, subject to the following conditions:
31 *
32 * The above copyright notice and this permission notice shall be included in
33 * all copies or substantial portions of the Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41 * THE SOFTWARE.
42 */
43
44#include "slirp.h"
45#ifdef RT_OS_OS2
46# include <paths.h>
47#endif
48
49#include <VBox/err.h>
50#include <VBox/vmm/pdmdrv.h>
51#include <iprt/assert.h>
52#include <iprt/file.h>
53#ifndef RT_OS_WINDOWS
54# include <sys/ioctl.h>
55# include <poll.h>
56# include <netinet/in.h>
57#else
58# include <Winnls.h>
59# define _WINSOCK2API_
60# include <IPHlpApi.h>
61#endif
62#include <alias.h>
63
64#ifndef RT_OS_WINDOWS
65
66# define DO_ENGAGE_EVENT1(so, fdset, label) \
67 do { \
68 if ( so->so_poll_index != -1 \
69 && so->s == polls[so->so_poll_index].fd) \
70 { \
71 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
72 break; \
73 } \
74 AssertRelease(poll_index < (nfds)); \
75 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
76 polls[poll_index].fd = (so)->s; \
77 (so)->so_poll_index = poll_index; \
78 polls[poll_index].events = N_(fdset ## _poll); \
79 polls[poll_index].revents = 0; \
80 poll_index++; \
81 } while (0)
82
83# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
84 do { \
85 if ( so->so_poll_index != -1 \
86 && so->s == polls[so->so_poll_index].fd) \
87 { \
88 polls[so->so_poll_index].events |= \
89 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
90 break; \
91 } \
92 AssertRelease(poll_index < (nfds)); \
93 polls[poll_index].fd = (so)->s; \
94 (so)->so_poll_index = poll_index; \
95 polls[poll_index].events = \
96 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
97 poll_index++; \
98 } while (0)
99
100# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
101
102/*
103 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
104 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
105 * used to catch POLLNVAL while logging and return false in case of error while
106 * normal usage.
107 */
108# define DO_CHECK_FD_SET(so, events, fdset) \
109 ( ((so)->so_poll_index != -1) \
110 && ((so)->so_poll_index <= ndfs) \
111 && ((so)->s == polls[so->so_poll_index].fd) \
112 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
113 && ( N_(fdset ## _poll) == POLLNVAL \
114 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
115
116 /* specific for Unix API */
117# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
118 /* specific for Windows Winsock API */
119# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
120
121# ifndef RT_OS_LINUX
122# define readfds_poll (POLLRDNORM)
123# define writefds_poll (POLLWRNORM)
124# else
125# define readfds_poll (POLLIN)
126# define writefds_poll (POLLOUT)
127# endif
128# define xfds_poll (POLLPRI)
129# define closefds_poll (POLLHUP)
130# define rderr_poll (POLLERR)
131# define rdhup_poll (POLLHUP)
132# define nval_poll (POLLNVAL)
133
134# define ICMP_ENGAGE_EVENT(so, fdset) \
135 do { \
136 if (pData->icmp_socket.s != -1) \
137 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
138 } while (0)
139
140#else /* RT_OS_WINDOWS */
141
142/*
143 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
144 * So no call to WSAEventSelect necessary.
145 */
146# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
147
148/*
149 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
150 */
151# define DO_ENGAGE_EVENT1(so, fdset1, label) \
152 do { \
153 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
154 if (rc == SOCKET_ERROR) \
155 { \
156 /* This should not happen */ \
157 error = WSAGetLastError(); \
158 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
159 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
160 } \
161 } while (0); \
162 CONTINUE(label)
163
164# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
165 DO_ENGAGE_EVENT1((so), (fdset1), label)
166
167# define DO_POLL_EVENTS(rc, error, so, events, label) \
168 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
169 if ((rc) == SOCKET_ERROR) \
170 { \
171 (error) = WSAGetLastError(); \
172 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
173 CONTINUE(label); \
174 }
175
176# define acceptds_win FD_ACCEPT
177# define acceptds_win_bit FD_ACCEPT_BIT
178# define readfds_win FD_READ
179# define readfds_win_bit FD_READ_BIT
180# define writefds_win FD_WRITE
181# define writefds_win_bit FD_WRITE_BIT
182# define xfds_win FD_OOB
183# define xfds_win_bit FD_OOB_BIT
184# define closefds_win FD_CLOSE
185# define closefds_win_bit FD_CLOSE_BIT
186# define connectfds_win FD_CONNECT
187# define connectfds_win_bit FD_CONNECT_BIT
188
189# define closefds_win FD_CLOSE
190# define closefds_win_bit FD_CLOSE_BIT
191
192# define DO_CHECK_FD_SET(so, events, fdset) \
193 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
194
195# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
196# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
197
198#endif /* RT_OS_WINDOWS */
199
200#define TCP_ENGAGE_EVENT1(so, fdset) \
201 DO_ENGAGE_EVENT1((so), fdset, tcp)
202
203#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
204 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
205
206#ifdef RT_OS_WINDOWS
207# define WIN_TCP_ENGAGE_EVENT2(so, fdset, fdset2) TCP_ENGAGE_EVENT2(so, fdset1, fdset2)
208#else
209# define WIN_TCP_ENGAGE_EVENT2(so, fdset, fdset2) do{}while(0)
210#endif
211
212#define UDP_ENGAGE_EVENT(so, fdset) \
213 DO_ENGAGE_EVENT1((so), fdset, udp)
214
215#define POLL_TCP_EVENTS(rc, error, so, events) \
216 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
217
218#define POLL_UDP_EVENTS(rc, error, so, events) \
219 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
220
221#define CHECK_FD_SET(so, events, set) \
222 (DO_CHECK_FD_SET((so), (events), set))
223
224#define WIN_CHECK_FD_SET(so, events, set) \
225 (DO_WIN_CHECK_FD_SET((so), (events), set))
226
227#define UNIX_CHECK_FD_SET(so, events, set) \
228 (DO_UNIX_CHECK_FD_SET(so, events, set))
229
230/*
231 * Loging macros
232 */
233#if VBOX_WITH_DEBUG_NAT_SOCKETS
234# if defined(RT_OS_WINDOWS)
235# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
236 do { \
237 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
238 } while (0)
239# else /* !RT_OS_WINDOWS */
240# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
241 do { \
242 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
243 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
244 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
245 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
246 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
247 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
248 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
249 } while (0)
250# endif /* !RT_OS_WINDOWS */
251#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
252# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
253#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
254
255#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
256 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
257
258static void activate_port_forwarding(PNATState, const uint8_t *pEther);
259
260static const uint8_t special_ethaddr[6] =
261{
262 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
263};
264
265static const uint8_t broadcast_ethaddr[6] =
266{
267 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
268};
269
270const uint8_t zerro_ethaddr[6] =
271{
272 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
273};
274
275#ifdef RT_OS_WINDOWS
276static int get_dns_addr_domain(PNATState pData,
277 const char **ppszDomain)
278{
279 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
280 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
281 PIP_ADAPTER_ADDRESSES pAddr = NULL;
282 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
283 ULONG size;
284 int wlen = 0;
285 char *pszSuffix;
286 struct dns_domain_entry *pDomain = NULL;
287 ULONG ret = ERROR_SUCCESS;
288
289 /* @todo add SKIPing flags to get only required information */
290
291 /* determine size of buffer */
292 size = 0;
293 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
294 if (ret != ERROR_BUFFER_OVERFLOW)
295 {
296 Log(("NAT: error %lu occurred on capacity detection operation\n", ret));
297 return -1;
298 }
299 if (size == 0)
300 {
301 Log(("NAT: Win socket API returns non capacity\n"));
302 return -1;
303 }
304
305 pAdapterAddr = RTMemAllocZ(size);
306 if (!pAdapterAddr)
307 {
308 Log(("NAT: No memory available\n"));
309 return -1;
310 }
311 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
312 if (ret != ERROR_SUCCESS)
313 {
314 Log(("NAT: error %lu occurred on fetching adapters info\n", ret));
315 RTMemFree(pAdapterAddr);
316 return -1;
317 }
318
319 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
320 {
321 int found;
322 if (pAddr->OperStatus != IfOperStatusUp)
323 continue;
324
325 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
326 {
327 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
328 struct in_addr InAddr;
329 struct dns_entry *pDns;
330
331 if (SockAddr->sa_family != AF_INET)
332 continue;
333
334 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
335
336 /* add dns server to list */
337 pDns = RTMemAllocZ(sizeof(struct dns_entry));
338 if (!pDns)
339 {
340 Log(("NAT: Can't allocate buffer for DNS entry\n"));
341 RTMemFree(pAdapterAddr);
342 return VERR_NO_MEMORY;
343 }
344
345 Log(("NAT: adding %RTnaipv4 to DNS server list\n", InAddr));
346 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
347 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
348 else
349 pDns->de_addr.s_addr = InAddr.s_addr;
350
351 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
352
353 if (pAddr->DnsSuffix == NULL)
354 continue;
355
356 /* uniq */
357 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
358 if (!pszSuffix || strlen(pszSuffix) == 0)
359 {
360 RTStrFree(pszSuffix);
361 continue;
362 }
363
364 found = 0;
365 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
366 {
367 if ( pDomain->dd_pszDomain != NULL
368 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
369 {
370 found = 1;
371 RTStrFree(pszSuffix);
372 break;
373 }
374 }
375 if (!found)
376 {
377 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
378 if (!pDomain)
379 {
380 Log(("NAT: not enough memory\n"));
381 RTStrFree(pszSuffix);
382 RTMemFree(pAdapterAddr);
383 return VERR_NO_MEMORY;
384 }
385 pDomain->dd_pszDomain = pszSuffix;
386 Log(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
387 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
388 }
389 }
390 }
391 RTMemFree(pAdapterAddr);
392 return 0;
393}
394
395#else /* !RT_OS_WINDOWS */
396
397static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
398{
399 size_t cbRead;
400 char bTest;
401 int rc = VERR_NO_MEMORY;
402 char *pu8Buf = (char *)pvBuf;
403 *pcbRead = 0;
404
405 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
406 && (pu8Buf - (char *)pvBuf) < cbBufSize)
407 {
408 if (cbRead == 0)
409 return VERR_EOF;
410
411 if (bTest == '\r' || bTest == '\n')
412 {
413 *pu8Buf = 0;
414 return VINF_SUCCESS;
415 }
416 *pu8Buf = bTest;
417 pu8Buf++;
418 (*pcbRead)++;
419 }
420 return rc;
421}
422
423static int get_dns_addr_domain(PNATState pData, const char **ppszDomain)
424{
425 char buff[512];
426 char buff2[256];
427 RTFILE f;
428 int cNameserversFound = 0;
429 bool fWarnTooManyDnsServers = false;
430 struct in_addr tmp_addr;
431 int rc;
432 size_t bytes;
433
434# ifdef RT_OS_OS2
435 /* Try various locations. */
436 char *etc = getenv("ETC");
437 if (etc)
438 {
439 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
440 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
441 }
442 if (RT_FAILURE(rc))
443 {
444 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
445 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
446 }
447 if (RT_FAILURE(rc))
448 {
449 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
450 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
451 }
452# else /* !RT_OS_OS2 */
453# ifndef DEBUG_vvl
454 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
455# else
456 char *home = getenv("HOME");
457 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
458 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
459 if (RT_SUCCESS(rc))
460 {
461 Log(("NAT: DNS we're using %s\n", buff));
462 }
463 else
464 {
465 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
466 Log(("NAT: DNS we're using %s\n", buff));
467 }
468# endif
469# endif /* !RT_OS_OS2 */
470 if (RT_FAILURE(rc))
471 return -1;
472
473 if (ppszDomain)
474 *ppszDomain = NULL;
475
476 Log(("NAT: DNS Servers:\n"));
477 while ( RT_SUCCESS(rc = RTFileGets(f, buff, sizeof(buff), &bytes))
478 && rc != VERR_EOF)
479 {
480 struct dns_entry *pDns = NULL;
481 if ( cNameserversFound == 4
482 && !fWarnTooManyDnsServers
483 && sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1)
484 {
485 fWarnTooManyDnsServers = true;
486 LogRel(("NAT: too many nameservers registered.\n"));
487 }
488 if ( sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1
489 && cNameserversFound < 4) /* Unix doesn't accept more than 4 name servers*/
490 {
491 if (!inet_aton(buff2, &tmp_addr))
492 continue;
493
494 /* localhost mask */
495 pDns = RTMemAllocZ(sizeof (struct dns_entry));
496 if (!pDns)
497 {
498 Log(("can't alloc memory for DNS entry\n"));
499 return -1;
500 }
501
502 /* check */
503 pDns->de_addr.s_addr = tmp_addr.s_addr;
504 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
505 {
506 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
507 }
508 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
509 cNameserversFound++;
510 }
511 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
512 {
513 char *tok;
514 char *saveptr;
515 struct dns_domain_entry *pDomain = NULL;
516 int fFoundDomain = 0;
517 tok = strtok_r(&buff[6], " \t\n", &saveptr);
518 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
519 {
520 if ( tok != NULL
521 && strcmp(tok, pDomain->dd_pszDomain) == 0)
522 {
523 fFoundDomain = 1;
524 break;
525 }
526 }
527 if (tok != NULL && !fFoundDomain)
528 {
529 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
530 if (!pDomain)
531 {
532 Log(("NAT: not enought memory to add domain list\n"));
533 return VERR_NO_MEMORY;
534 }
535 pDomain->dd_pszDomain = RTStrDup(tok);
536 Log(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
537 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
538 }
539 }
540 }
541 RTFileClose(f);
542 if (!cNameserversFound)
543 return -1;
544 return 0;
545}
546
547#endif /* !RT_OS_WINDOWS */
548
549int slirp_init_dns_list(PNATState pData)
550{
551 TAILQ_INIT(&pData->pDnsList);
552 LIST_INIT(&pData->pDomainList);
553 return get_dns_addr_domain(pData, NULL);
554}
555
556void slirp_release_dns_list(PNATState pData)
557{
558 struct dns_entry *pDns = NULL;
559 struct dns_domain_entry *pDomain = NULL;
560
561 while (!TAILQ_EMPTY(&pData->pDnsList))
562 {
563 pDns = TAILQ_FIRST(&pData->pDnsList);
564 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
565 RTMemFree(pDns);
566 }
567
568 while (!LIST_EMPTY(&pData->pDomainList))
569 {
570 pDomain = LIST_FIRST(&pData->pDomainList);
571 LIST_REMOVE(pDomain, dd_list);
572 if (pDomain->dd_pszDomain != NULL)
573 RTStrFree(pDomain->dd_pszDomain);
574 RTMemFree(pDomain);
575 }
576}
577
578int get_dns_addr(PNATState pData)
579{
580 return get_dns_addr_domain(pData, NULL);
581}
582
583int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
584 bool fPassDomain, bool fUseHostResolver, int i32AliasMode,
585 int iIcmpCacheLimit, void *pvUser)
586{
587 int fNATfailed = 0;
588 int rc;
589 PNATState pData;
590 if (u32Netmask & 0x1f)
591 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
592 return VERR_INVALID_PARAMETER;
593 pData = RTMemAllocZ(RT_ALIGN_Z(sizeof(NATState), sizeof(uint64_t)));
594 *ppData = pData;
595 if (!pData)
596 return VERR_NO_MEMORY;
597 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
598 pData->fUseHostResolver = fUseHostResolver;
599 pData->pvUser = pvUser;
600 pData->netmask = u32Netmask;
601
602 /* sockets & TCP defaults */
603 pData->socket_rcv = 64 * _1K;
604 pData->socket_snd = 64 * _1K;
605 tcp_sndspace = 64 * _1K;
606 tcp_rcvspace = 64 * _1K;
607
608 /*
609 * Use the same default here as in DevNAT.cpp (SoMaxConnection CFGM value)
610 * to avoid release log noise.
611 */
612 pData->soMaxConn = 10;
613
614#ifdef RT_OS_WINDOWS
615 {
616 WSADATA Data;
617 WSAStartup(MAKEWORD(2, 0), &Data);
618 }
619 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
620#endif
621#ifdef VBOX_WITH_SLIRP_MT
622 QSOCKET_LOCK_CREATE(tcb);
623 QSOCKET_LOCK_CREATE(udb);
624 rc = RTReqQueueCreate(&pData->pReqQueue);
625 AssertReleaseRC(rc);
626#endif
627
628 link_up = 1;
629
630 rc = bootp_dhcp_init(pData);
631 if (RT_FAILURE(rc))
632 {
633 Log(("NAT: DHCP server initialization failed\n"));
634 RTMemFree(pData);
635 *ppData = NULL;
636 return rc;
637 }
638 debug_init(pData);
639 if_init(pData);
640 ip_init(pData);
641 icmp_init(pData, iIcmpCacheLimit);
642
643 /* Initialise mbufs *after* setting the MTU */
644 mbuf_init(pData);
645
646 pData->special_addr.s_addr = u32NetAddr;
647 pData->slirp_ethaddr = &special_ethaddr[0];
648 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
649 /* @todo: add ability to configure this staff */
650
651 /* set default addresses */
652 inet_aton("127.0.0.1", &loopback_addr);
653 if (!pData->fUseHostResolver)
654 {
655 if (slirp_init_dns_list(pData) < 0)
656 fNATfailed = 1;
657
658 dnsproxy_init(pData);
659 }
660 if (i32AliasMode & ~(PKT_ALIAS_LOG|PKT_ALIAS_SAME_PORTS|PKT_ALIAS_PROXY_ONLY))
661 {
662 Log(("NAT: alias mode %x is ignored\n", i32AliasMode));
663 i32AliasMode = 0;
664 }
665 pData->i32AliasMode = i32AliasMode;
666 getouraddr(pData);
667 {
668 int flags = 0;
669 struct in_addr proxy_addr;
670 pData->proxy_alias = LibAliasInit(pData, NULL);
671 if (pData->proxy_alias == NULL)
672 {
673 Log(("NAT: LibAlias default rule wasn't initialized\n"));
674 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
675 }
676 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
677#ifndef NO_FW_PUNCH
678 flags |= PKT_ALIAS_PUNCH_FW;
679#endif
680 flags |= pData->i32AliasMode; /* do transparent proxying */
681 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
682 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
683 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
684 ftp_alias_load(pData);
685 nbt_alias_load(pData);
686 if (pData->fUseHostResolver)
687 dns_alias_load(pData);
688 }
689#ifdef VBOX_WITH_NAT_SEND2HOME
690 /* @todo: we should know all interfaces available on host. */
691 pData->pInSockAddrHomeAddress = RTMemAllocZ(sizeof(struct sockaddr));
692 pData->cInHomeAddressSize = 1;
693 inet_aton("192.168.1.25", &pData->pInSockAddrHomeAddress[0].sin_addr);
694 pData->pInSockAddrHomeAddress[0].sin_family = AF_INET;
695#ifdef RT_OS_DARWIN
696 pData->pInSockAddrHomeAddress[0].sin_len = sizeof(struct sockaddr_in);
697#endif
698#endif
699 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
700}
701
702/**
703 * Register statistics.
704 */
705void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
706{
707#ifdef VBOX_WITH_STATISTICS
708# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
709# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
710# include "counters.h"
711# undef COUNTER
712/** @todo register statistics for the variables dumped by:
713 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
714 * mbufstats(pData); sockstats(pData); */
715#else /* VBOX_WITH_STATISTICS */
716 NOREF(pData);
717 NOREF(pDrvIns);
718#endif /* !VBOX_WITH_STATISTICS */
719}
720
721/**
722 * Deregister statistics.
723 */
724void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
725{
726 if (pData == NULL)
727 return;
728#ifdef VBOX_WITH_STATISTICS
729# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
730# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
731# include "counters.h"
732#else /* VBOX_WITH_STATISTICS */
733 NOREF(pData);
734 NOREF(pDrvIns);
735#endif /* !VBOX_WITH_STATISTICS */
736}
737
738/**
739 * Marks the link as up, making it possible to establish new connections.
740 */
741void slirp_link_up(PNATState pData)
742{
743 struct arp_cache_entry *ac;
744 link_up = 1;
745
746 if (LIST_EMPTY(&pData->arp_cache))
747 return;
748
749 LIST_FOREACH(ac, &pData->arp_cache, list)
750 {
751 activate_port_forwarding(pData, ac->ether);
752 }
753}
754
755/**
756 * Marks the link as down and cleans up the current connections.
757 */
758void slirp_link_down(PNATState pData)
759{
760 struct socket *so;
761 struct port_forward_rule *rule;
762
763 while ((so = tcb.so_next) != &tcb)
764 {
765 if (so->so_state & SS_NOFDREF || so->s == -1)
766 sofree(pData, so);
767 else
768 tcp_drop(pData, sototcpcb(so), 0);
769 }
770
771 while ((so = udb.so_next) != &udb)
772 udp_detach(pData, so);
773
774 /*
775 * Clear the active state of port-forwarding rules to force
776 * re-setup on restoration of communications.
777 */
778 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
779 {
780 rule->activated = 0;
781 }
782 pData->cRedirectionsActive = 0;
783
784 link_up = 0;
785}
786
787/**
788 * Terminates the slirp component.
789 */
790void slirp_term(PNATState pData)
791{
792 if (pData == NULL)
793 return;
794 icmp_finit(pData);
795
796 slirp_link_down(pData);
797 slirp_release_dns_list(pData);
798 ftp_alias_unload(pData);
799 nbt_alias_unload(pData);
800 if (pData->fUseHostResolver)
801 {
802 dns_alias_unload(pData);
803#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
804 while (!LIST_EMPTY(&pData->DNSMapHead))
805 {
806 PDNSMAPPINGENTRY pDnsEntry = LIST_FIRST(&pData->DNSMapHead);
807 LIST_REMOVE(pDnsEntry, MapList);
808 RTStrFree(pDnsEntry->pszCName);
809 RTMemFree(pDnsEntry);
810 }
811#endif
812 }
813 while (!LIST_EMPTY(&instancehead))
814 {
815 struct libalias *la = LIST_FIRST(&instancehead);
816 /* libalias do all clean up */
817 LibAliasUninit(la);
818 }
819 while (!LIST_EMPTY(&pData->arp_cache))
820 {
821 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
822 LIST_REMOVE(ac, list);
823 RTMemFree(ac);
824 }
825 bootp_dhcp_fini(pData);
826 m_fini(pData);
827#ifdef RT_OS_WINDOWS
828 WSACleanup();
829#endif
830#ifndef VBOX_WITH_SLIRP_BSD_SBUF
831#ifdef LOG_ENABLED
832 Log(("\n"
833 "NAT statistics\n"
834 "--------------\n"
835 "\n"));
836 ipstats(pData);
837 tcpstats(pData);
838 udpstats(pData);
839 icmpstats(pData);
840 mbufstats(pData);
841 sockstats(pData);
842 Log(("\n"
843 "\n"
844 "\n"));
845#endif
846#endif
847 RTMemFree(pData);
848}
849
850
851#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
852#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
853
854/*
855 * curtime kept to an accuracy of 1ms
856 */
857static void updtime(PNATState pData)
858{
859#ifdef RT_OS_WINDOWS
860 struct _timeb tb;
861
862 _ftime(&tb);
863 curtime = (u_int)tb.time * (u_int)1000;
864 curtime += (u_int)tb.millitm;
865#else
866 gettimeofday(&tt, 0);
867
868 curtime = (u_int)tt.tv_sec * (u_int)1000;
869 curtime += (u_int)tt.tv_usec / (u_int)1000;
870
871 if ((tt.tv_usec % 1000) >= 500)
872 curtime++;
873#endif
874}
875
876#ifdef RT_OS_WINDOWS
877void slirp_select_fill(PNATState pData, int *pnfds)
878#else /* RT_OS_WINDOWS */
879void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
880#endif /* !RT_OS_WINDOWS */
881{
882 struct socket *so, *so_next;
883 int nfds;
884#if defined(RT_OS_WINDOWS)
885 int rc;
886 int error;
887#else
888 int poll_index = 0;
889#endif
890 int i;
891
892 STAM_PROFILE_START(&pData->StatFill, a);
893
894 nfds = *pnfds;
895
896 /*
897 * First, TCP sockets
898 */
899 do_slowtimo = 0;
900 if (!link_up)
901 goto done;
902
903 /*
904 * *_slowtimo needs calling if there are IP fragments
905 * in the fragment queue, or there are TCP connections active
906 */
907 /* XXX:
908 * triggering of fragment expiration should be the same but use new macroses
909 */
910 do_slowtimo = (tcb.so_next != &tcb);
911 if (!do_slowtimo)
912 {
913 for (i = 0; i < IPREASS_NHASH; i++)
914 {
915 if (!TAILQ_EMPTY(&ipq[i]))
916 {
917 do_slowtimo = 1;
918 break;
919 }
920 }
921 }
922 /* always add the ICMP socket */
923#ifndef RT_OS_WINDOWS
924 pData->icmp_socket.so_poll_index = -1;
925#endif
926 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
927
928 STAM_COUNTER_RESET(&pData->StatTCP);
929 STAM_COUNTER_RESET(&pData->StatTCPHot);
930
931 QSOCKET_FOREACH(so, so_next, tcp)
932 /* { */
933#if !defined(RT_OS_WINDOWS)
934 so->so_poll_index = -1;
935#endif
936 STAM_COUNTER_INC(&pData->StatTCP);
937#ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
938 /* TCP socket can't be cloned */
939 Assert((!so->so_cloneOf));
940#endif
941 /*
942 * See if we need a tcp_fasttimo
943 */
944 if ( time_fasttimo == 0
945 && so->so_tcpcb != NULL
946 && so->so_tcpcb->t_flags & TF_DELACK)
947 {
948 time_fasttimo = curtime; /* Flag when we want a fasttimo */
949 }
950
951 /*
952 * NOFDREF can include still connecting to local-host,
953 * newly socreated() sockets etc. Don't want to select these.
954 */
955 if (so->so_state & SS_NOFDREF || so->s == -1)
956 CONTINUE(tcp);
957
958 /*
959 * Set for reading sockets which are accepting
960 */
961 if (so->so_state & SS_FACCEPTCONN)
962 {
963 STAM_COUNTER_INC(&pData->StatTCPHot);
964 TCP_ENGAGE_EVENT1(so, readfds);
965 CONTINUE(tcp);
966 }
967
968 /*
969 * Set for writing sockets which are connecting
970 */
971 if (so->so_state & SS_ISFCONNECTING)
972 {
973 Log2(("connecting %R[natsock] engaged\n",so));
974 STAM_COUNTER_INC(&pData->StatTCPHot);
975#ifdef RT_OS_WINDOWS
976 WIN_TCP_ENGAGE_EVENT2(so, writefds, connectfds);
977#else
978 TCP_ENGAGE_EVENT1(so, writefds);
979#endif
980 }
981
982 /*
983 * Set for writing if we are connected, can send more, and
984 * we have something to send
985 */
986 if (CONN_CANFSEND(so) && SBUF_LEN(&so->so_rcv))
987 {
988 STAM_COUNTER_INC(&pData->StatTCPHot);
989 TCP_ENGAGE_EVENT1(so, writefds);
990 }
991
992 /*
993 * Set for reading (and urgent data) if we are connected, can
994 * receive more, and we have room for it XXX /2 ?
995 */
996 /* @todo: vvl - check which predicat here will be more useful here in rerm of new sbufs. */
997 if ( CONN_CANFRCV(so)
998 && (SBUF_LEN(&so->so_snd) < (SBUF_SIZE(&so->so_snd)/2))
999#ifdef RT_OS_WINDOWS
1000 && !(so->so_state & SS_ISFCONNECTING)
1001#endif
1002 )
1003 {
1004 STAM_COUNTER_INC(&pData->StatTCPHot);
1005 TCP_ENGAGE_EVENT2(so, readfds, xfds);
1006 }
1007 LOOP_LABEL(tcp, so, so_next);
1008 }
1009
1010 /*
1011 * UDP sockets
1012 */
1013 STAM_COUNTER_RESET(&pData->StatUDP);
1014 STAM_COUNTER_RESET(&pData->StatUDPHot);
1015
1016 QSOCKET_FOREACH(so, so_next, udp)
1017 /* { */
1018
1019 STAM_COUNTER_INC(&pData->StatUDP);
1020#if !defined(RT_OS_WINDOWS)
1021 so->so_poll_index = -1;
1022#endif
1023
1024 /*
1025 * See if it's timed out
1026 */
1027 if (so->so_expire)
1028 {
1029 if (so->so_expire <= curtime)
1030 {
1031 Log2(("NAT: %R[natsock] expired\n", so));
1032 if (so->so_timeout != NULL)
1033 {
1034 so->so_timeout(pData, so, so->so_timeout_arg);
1035 }
1036#ifdef VBOX_WITH_SLIRP_MT
1037 /* we need so_next for continue our cycle*/
1038 so_next = so->so_next;
1039#endif
1040 UDP_DETACH(pData, so, so_next);
1041 CONTINUE_NO_UNLOCK(udp);
1042 }
1043 }
1044#ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
1045 if (so->so_cloneOf)
1046 CONTINUE_NO_UNLOCK(udp);
1047#endif
1048
1049 /*
1050 * When UDP packets are received from over the link, they're
1051 * sendto()'d straight away, so no need for setting for writing
1052 * Limit the number of packets queued by this session to 4.
1053 * Note that even though we try and limit this to 4 packets,
1054 * the session could have more queued if the packets needed
1055 * to be fragmented.
1056 *
1057 * (XXX <= 4 ?)
1058 */
1059 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
1060 {
1061 STAM_COUNTER_INC(&pData->StatUDPHot);
1062 UDP_ENGAGE_EVENT(so, readfds);
1063 }
1064 LOOP_LABEL(udp, so, so_next);
1065 }
1066done:
1067
1068#if defined(RT_OS_WINDOWS)
1069 *pnfds = VBOX_EVENT_COUNT;
1070#else /* RT_OS_WINDOWS */
1071 AssertRelease(poll_index <= *pnfds);
1072 *pnfds = poll_index;
1073#endif /* !RT_OS_WINDOWS */
1074
1075 STAM_PROFILE_STOP(&pData->StatFill, a);
1076}
1077
1078
1079static bool slirpConnectOrWrite(PNATState pData, struct socket *so, bool fConnectOnly)
1080{
1081 int ret;
1082 LogFlowFunc(("ENTER: so:%R[natsock], fConnectOnly:%RTbool\n", so, fConnectOnly));
1083 /*
1084 * Check for non-blocking, still-connecting sockets
1085 */
1086 if (so->so_state & SS_ISFCONNECTING)
1087 {
1088 Log2(("connecting %R[natsock] catched\n", so));
1089 /* Connected */
1090 so->so_state &= ~SS_ISFCONNECTING;
1091
1092 /*
1093 * This should be probably guarded by PROBE_CONN too. Anyway,
1094 * we disable it on OS/2 because the below send call returns
1095 * EFAULT which causes the opened TCP socket to close right
1096 * after it has been opened and connected.
1097 */
1098#ifndef RT_OS_OS2
1099 ret = send(so->s, (const char *)&ret, 0, 0);
1100 if (ret < 0)
1101 {
1102 /* XXXXX Must fix, zero bytes is a NOP */
1103 if ( errno == EAGAIN
1104 || errno == EWOULDBLOCK
1105 || errno == EINPROGRESS
1106 || errno == ENOTCONN)
1107 {
1108 LogFlowFunc(("LEAVE: false\n"));
1109 return false;
1110 }
1111
1112 /* else failed */
1113 so->so_state = SS_NOFDREF;
1114 }
1115 /* else so->so_state &= ~SS_ISFCONNECTING; */
1116#endif
1117
1118 /*
1119 * Continue tcp_input
1120 */
1121 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1122 /* continue; */
1123 }
1124 else if (!fConnectOnly)
1125 SOWRITE(ret, pData, so);
1126 /*
1127 * XXX If we wrote something (a lot), there could be the need
1128 * for a window update. In the worst case, the remote will send
1129 * a window probe to get things going again.
1130 */
1131 LogFlowFunc(("LEAVE: true\n"));
1132 return true;
1133}
1134
1135#if defined(RT_OS_WINDOWS)
1136void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1137#else /* RT_OS_WINDOWS */
1138void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1139#endif /* !RT_OS_WINDOWS */
1140{
1141 struct socket *so, *so_next;
1142 int ret;
1143#if defined(RT_OS_WINDOWS)
1144 WSANETWORKEVENTS NetworkEvents;
1145 int rc;
1146 int error;
1147#endif
1148
1149 STAM_PROFILE_START(&pData->StatPoll, a);
1150
1151 /* Update time */
1152 updtime(pData);
1153
1154 /*
1155 * See if anything has timed out
1156 */
1157 if (link_up)
1158 {
1159 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1160 {
1161 STAM_PROFILE_START(&pData->StatFastTimer, b);
1162 tcp_fasttimo(pData);
1163 time_fasttimo = 0;
1164 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1165 }
1166 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1167 {
1168 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1169 ip_slowtimo(pData);
1170 tcp_slowtimo(pData);
1171 last_slowtimo = curtime;
1172 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1173 }
1174 }
1175#if defined(RT_OS_WINDOWS)
1176 if (fTimeout)
1177 return; /* only timer update */
1178#endif
1179
1180 /*
1181 * Check sockets
1182 */
1183 if (!link_up)
1184 goto done;
1185#if defined(RT_OS_WINDOWS)
1186 /*XXX: before renaming please make see define
1187 * fIcmp in slirp_state.h
1188 */
1189 if (fIcmp)
1190 sorecvfrom(pData, &pData->icmp_socket);
1191#else
1192 if ( (pData->icmp_socket.s != -1)
1193 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1194 sorecvfrom(pData, &pData->icmp_socket);
1195#endif
1196 /*
1197 * Check TCP sockets
1198 */
1199 QSOCKET_FOREACH(so, so_next, tcp)
1200 /* { */
1201
1202#ifdef VBOX_WITH_SLIRP_MT
1203 if ( so->so_state & SS_NOFDREF
1204 && so->so_deleted == 1)
1205 {
1206 struct socket *son, *sop = NULL;
1207 QSOCKET_LOCK(tcb);
1208 if (so->so_next != NULL)
1209 {
1210 if (so->so_next != &tcb)
1211 SOCKET_LOCK(so->so_next);
1212 son = so->so_next;
1213 }
1214 if ( so->so_prev != &tcb
1215 && so->so_prev != NULL)
1216 {
1217 SOCKET_LOCK(so->so_prev);
1218 sop = so->so_prev;
1219 }
1220 QSOCKET_UNLOCK(tcb);
1221 remque(pData, so);
1222 NSOCK_DEC();
1223 SOCKET_UNLOCK(so);
1224 SOCKET_LOCK_DESTROY(so);
1225 RTMemFree(so);
1226 so_next = son;
1227 if (sop != NULL)
1228 SOCKET_UNLOCK(sop);
1229 CONTINUE_NO_UNLOCK(tcp);
1230 }
1231#endif
1232 /* TCP socket can't be cloned */
1233#ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
1234 Assert((!so->so_cloneOf));
1235#endif
1236 /*
1237 * FD_ISSET is meaningless on these sockets
1238 * (and they can crash the program)
1239 */
1240 if (so->so_state & SS_NOFDREF || so->s == -1)
1241 CONTINUE(tcp);
1242
1243 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1244
1245 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1246
1247
1248 /*
1249 * Check for URG data
1250 * This will soread as well, so no need to
1251 * test for readfds below if this succeeds
1252 */
1253
1254 /* out-of-band data */
1255 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1256#ifdef RT_OS_DARWIN
1257 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1258 * combination on other Unixs hosts doesn't enter to this branch
1259 */
1260 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1261#endif
1262#ifdef RT_OS_WINDOWS
1263 /**
1264 * In some cases FD_CLOSE comes with FD_OOB, that confuse tcp processing.
1265 */
1266 && !WIN_CHECK_FD_SET(so, NetworkEvents, closefds)
1267#endif
1268 )
1269 {
1270 sorecvoob(pData, so);
1271 }
1272
1273 /*
1274 * Check sockets for reading
1275 */
1276 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1277 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1278 {
1279
1280#ifdef RT_OS_WINDOWS
1281 if (WIN_CHECK_FD_SET(so, NetworkEvents, connectfds))
1282 {
1283 /* Finish connection first */
1284 /* should we ignore return value? */
1285 bool fRet = slirpConnectOrWrite(pData, so, true);
1286 LogFunc(("fRet:%RTbool\n", fRet));
1287 }
1288#endif
1289 /*
1290 * Check for incoming connections
1291 */
1292 if (so->so_state & SS_FACCEPTCONN)
1293 {
1294 TCP_CONNECT(pData, so);
1295 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1296 CONTINUE(tcp);
1297 }
1298
1299 ret = soread(pData, so);
1300 /* Output it if we read something */
1301 if (RT_LIKELY(ret > 0))
1302 TCP_OUTPUT(pData, sototcpcb(so));
1303 }
1304
1305 /*
1306 * Check for FD_CLOSE events.
1307 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1308 */
1309 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1310 || (so->so_close == 1))
1311 {
1312 /*
1313 * drain the socket
1314 */
1315 for (;;)
1316 {
1317 ret = soread(pData, so);
1318 if (ret > 0)
1319 TCP_OUTPUT(pData, sototcpcb(so));
1320 else
1321 {
1322 Log2(("%R[natsock] errno %d (%s)\n", so, errno, strerror(errno)));
1323 break;
1324 }
1325 }
1326 /* mark the socket for termination _after_ it was drained */
1327 so->so_close = 1;
1328 /* No idea about Windows but on Posix, POLLHUP means that we can't send more.
1329 * Actually in the specific error scenario, POLLERR is set as well. */
1330#ifndef RT_OS_WINDOWS
1331 if (CHECK_FD_SET(so, NetworkEvents, rderr))
1332 sofcantsendmore(so);
1333#endif
1334 CONTINUE(tcp);
1335 }
1336
1337 /*
1338 * Check sockets for writing
1339 */
1340 if ( CHECK_FD_SET(so, NetworkEvents, writefds)
1341#ifdef RT_OS_WINDOWS
1342 || WIN_CHECK_FD_SET(so, NetworkEvents, connectfds)
1343#endif
1344 )
1345 {
1346 if(!slirpConnectOrWrite(pData, so, false))
1347 CONTINUE(tcp);
1348 }
1349
1350 /*
1351 * Probe a still-connecting, non-blocking socket
1352 * to check if it's still alive
1353 */
1354#ifdef PROBE_CONN
1355 if (so->so_state & SS_ISFCONNECTING)
1356 {
1357 ret = recv(so->s, (char *)&ret, 0, 0);
1358
1359 if (ret < 0)
1360 {
1361 /* XXX */
1362 if ( errno == EAGAIN
1363 || errno == EWOULDBLOCK
1364 || errno == EINPROGRESS
1365 || errno == ENOTCONN)
1366 {
1367 CONTINUE(tcp); /* Still connecting, continue */
1368 }
1369
1370 /* else failed */
1371 so->so_state = SS_NOFDREF;
1372
1373 /* tcp_input will take care of it */
1374 }
1375 else
1376 {
1377 ret = send(so->s, &ret, 0, 0);
1378 if (ret < 0)
1379 {
1380 /* XXX */
1381 if ( errno == EAGAIN
1382 || errno == EWOULDBLOCK
1383 || errno == EINPROGRESS
1384 || errno == ENOTCONN)
1385 {
1386 CONTINUE(tcp);
1387 }
1388 /* else failed */
1389 so->so_state = SS_NOFDREF;
1390 }
1391 else
1392 so->so_state &= ~SS_ISFCONNECTING;
1393
1394 }
1395 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1396 } /* SS_ISFCONNECTING */
1397#endif
1398 LOOP_LABEL(tcp, so, so_next);
1399 }
1400
1401 /*
1402 * Now UDP sockets.
1403 * Incoming packets are sent straight away, they're not buffered.
1404 * Incoming UDP data isn't buffered either.
1405 */
1406 QSOCKET_FOREACH(so, so_next, udp)
1407 /* { */
1408#ifdef VBOX_WITH_SLIRP_MT
1409 if ( so->so_state & SS_NOFDREF
1410 && so->so_deleted == 1)
1411 {
1412 struct socket *son, *sop = NULL;
1413 QSOCKET_LOCK(udb);
1414 if (so->so_next != NULL)
1415 {
1416 if (so->so_next != &udb)
1417 SOCKET_LOCK(so->so_next);
1418 son = so->so_next;
1419 }
1420 if ( so->so_prev != &udb
1421 && so->so_prev != NULL)
1422 {
1423 SOCKET_LOCK(so->so_prev);
1424 sop = so->so_prev;
1425 }
1426 QSOCKET_UNLOCK(udb);
1427 remque(pData, so);
1428 NSOCK_DEC();
1429 SOCKET_UNLOCK(so);
1430 SOCKET_LOCK_DESTROY(so);
1431 RTMemFree(so);
1432 so_next = son;
1433 if (sop != NULL)
1434 SOCKET_UNLOCK(sop);
1435 CONTINUE_NO_UNLOCK(udp);
1436 }
1437#endif
1438#ifdef VBOX_WITH_NAT_UDP_SOCKET_CLONE
1439 if (so->so_cloneOf)
1440 CONTINUE_NO_UNLOCK(udp);
1441#endif
1442 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1443
1444 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1445
1446 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1447 {
1448 SORECVFROM(pData, so);
1449 }
1450 LOOP_LABEL(udp, so, so_next);
1451 }
1452
1453done:
1454
1455 STAM_PROFILE_STOP(&pData->StatPoll, a);
1456}
1457
1458
1459struct arphdr
1460{
1461 unsigned short ar_hrd; /* format of hardware address */
1462 unsigned short ar_pro; /* format of protocol address */
1463 unsigned char ar_hln; /* length of hardware address */
1464 unsigned char ar_pln; /* length of protocol address */
1465 unsigned short ar_op; /* ARP opcode (command) */
1466
1467 /*
1468 * Ethernet looks like this : This bit is variable sized however...
1469 */
1470 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1471 unsigned char ar_sip[4]; /* sender IP address */
1472 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1473 unsigned char ar_tip[4]; /* target IP address */
1474};
1475AssertCompileSize(struct arphdr, 28);
1476
1477/**
1478 * @note This function will free m!
1479 */
1480static void arp_input(PNATState pData, struct mbuf *m)
1481{
1482 struct ethhdr *eh;
1483 struct ethhdr *reh;
1484 struct arphdr *ah;
1485 struct arphdr *rah;
1486 int ar_op;
1487 uint32_t htip;
1488 uint32_t tip;
1489 struct mbuf *mr;
1490 eh = mtod(m, struct ethhdr *);
1491 ah = (struct arphdr *)&eh[1];
1492 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1493 tip = *(uint32_t*)ah->ar_tip;
1494
1495 ar_op = RT_N2H_U16(ah->ar_op);
1496
1497 switch (ar_op)
1498 {
1499 case ARPOP_REQUEST:
1500 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1501 if (!mr)
1502 break;
1503 reh = mtod(mr, struct ethhdr *);
1504 mr->m_data += ETH_HLEN;
1505 rah = mtod(mr, struct arphdr *);
1506 mr->m_len = sizeof(struct arphdr);
1507 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1508 if ( CTL_CHECK(tip, CTL_DNS)
1509 || CTL_CHECK(tip, CTL_ALIAS)
1510 || CTL_CHECK(tip, CTL_TFTP))
1511 {
1512 rah->ar_hrd = RT_H2N_U16_C(1);
1513 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1514 rah->ar_hln = ETH_ALEN;
1515 rah->ar_pln = 4;
1516 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1517 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1518
1519 if (!slirpMbufTagService(pData, mr, (uint8_t)(htip & ~pData->netmask)))
1520 {
1521 static bool fTagErrorReported;
1522 if (!fTagErrorReported)
1523 {
1524 LogRel(("NAT: couldn't add the tag(PACKET_SERVICE:%d) to mbuf:%p\n",
1525 (uint8_t)(htip & ~pData->netmask), m));
1526 fTagErrorReported = true;
1527 }
1528 }
1529 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1530
1531 memcpy(rah->ar_sip, ah->ar_tip, 4);
1532 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1533 memcpy(rah->ar_tip, ah->ar_sip, 4);
1534 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1535 }
1536 else
1537 m_freem(pData, mr);
1538
1539 /* Gratuitous ARP */
1540 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1541 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1542 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1543 {
1544 /* We've received an announce about address assignment,
1545 * let's do an ARP cache update
1546 */
1547 static bool fGratuitousArpReported;
1548 if (!fGratuitousArpReported)
1549 {
1550 LogRel(("NAT: Gratuitous ARP [IP:%RTnaipv4, ether:%RTmac]\n",
1551 ah->ar_sip, ah->ar_sha));
1552 fGratuitousArpReported = true;
1553 }
1554 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]);
1555 }
1556 break;
1557
1558 case ARPOP_REPLY:
1559 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]);
1560 break;
1561
1562 default:
1563 break;
1564 }
1565
1566 m_freem(pData, m);
1567}
1568
1569/**
1570 * Feed a packet into the slirp engine.
1571 *
1572 * @param m Data buffer, m_len is not valid.
1573 * @param cbBuf The length of the data in m.
1574 */
1575void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1576{
1577 int proto;
1578 static bool fWarnedIpv6;
1579 struct ethhdr *eh;
1580 uint8_t au8Ether[ETH_ALEN];
1581
1582 m->m_len = cbBuf;
1583 if (cbBuf < ETH_HLEN)
1584 {
1585 Log(("NAT: packet having size %d has been ignored\n", m->m_len));
1586 m_freem(pData, m);
1587 return;
1588 }
1589 eh = mtod(m, struct ethhdr *);
1590 proto = RT_N2H_U16(eh->h_proto);
1591
1592 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1593
1594 switch(proto)
1595 {
1596 case ETH_P_ARP:
1597 arp_input(pData, m);
1598 break;
1599
1600 case ETH_P_IP:
1601 /* Update time. Important if the network is very quiet, as otherwise
1602 * the first outgoing connection gets an incorrect timestamp. */
1603 updtime(pData);
1604 m_adj(m, ETH_HLEN);
1605 M_ASSERTPKTHDR(m);
1606 m->m_pkthdr.header = mtod(m, void *);
1607 ip_input(pData, m);
1608 break;
1609
1610 case ETH_P_IPV6:
1611 m_freem(pData, m);
1612 if (!fWarnedIpv6)
1613 {
1614 LogRel(("NAT: IPv6 not supported\n"));
1615 fWarnedIpv6 = true;
1616 }
1617 break;
1618
1619 default:
1620 Log(("NAT: Unsupported protocol %x\n", proto));
1621 m_freem(pData, m);
1622 break;
1623 }
1624
1625 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1626 activate_port_forwarding(pData, au8Ether);
1627}
1628
1629/**
1630 * Output the IP packet to the ethernet device.
1631 *
1632 * @note This function will free m!
1633 */
1634void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1635{
1636 struct ethhdr *eh;
1637 uint8_t *mbuf = NULL;
1638 size_t mlen = 0;
1639 STAM_PROFILE_START(&pData->StatIF_encap, a);
1640 LogFlowFunc(("ENTER: pData:%p, eth_proto:%RX16, m:%p, flags:%d\n",
1641 pData, eth_proto, m, flags));
1642
1643 M_ASSERTPKTHDR(m);
1644 m->m_data -= ETH_HLEN;
1645 m->m_len += ETH_HLEN;
1646 eh = mtod(m, struct ethhdr *);
1647 mlen = m->m_len;
1648
1649 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1650 {
1651 struct m_tag *t = m_tag_first(m);
1652 uint8_t u8ServiceId = CTL_ALIAS;
1653 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1654 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1655 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1656 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1657 {
1658 /* don't do anything */
1659 m_freem(pData, m);
1660 goto done;
1661 }
1662 if ( t
1663 && (t = m_tag_find(m, PACKET_SERVICE, NULL)))
1664 {
1665 Assert(t);
1666 u8ServiceId = *(uint8_t *)&t[1];
1667 }
1668 eh->h_source[5] = u8ServiceId;
1669 }
1670 /*
1671 * we're processing the chain, that isn't not expected.
1672 */
1673 Assert((!m->m_next));
1674 if (m->m_next)
1675 {
1676 Log(("NAT: if_encap's recived the chain, dropping...\n"));
1677 m_freem(pData, m);
1678 goto done;
1679 }
1680 mbuf = mtod(m, uint8_t *);
1681 eh->h_proto = RT_H2N_U16(eth_proto);
1682 LogFunc(("eh(dst:%RTmac, src:%RTmac)\n", eh->h_dest, eh->h_source));
1683 if (flags & ETH_ENCAP_URG)
1684 slirp_urg_output(pData->pvUser, m, mbuf, mlen);
1685 else
1686 slirp_output(pData->pvUser, m, mbuf, mlen);
1687done:
1688 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1689 LogFlowFuncLeave();
1690}
1691
1692/**
1693 * Still we're using dhcp server leasing to map ether to IP
1694 * @todo see rt_lookup_in_cache
1695 */
1696static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1697{
1698 uint32_t ip = INADDR_ANY;
1699 int rc;
1700
1701 if (eth_addr == NULL)
1702 return INADDR_ANY;
1703
1704 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1705 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1706 return INADDR_ANY;
1707
1708 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1709 if (RT_SUCCESS(rc))
1710 return ip;
1711
1712 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1713 /* ignore return code, ip will be set to INADDR_ANY on error */
1714 return ip;
1715}
1716
1717/**
1718 * We need check if we've activated port forwarding
1719 * for specific machine ... that of course relates to
1720 * service mode
1721 * @todo finish this for service case
1722 */
1723static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1724{
1725 struct port_forward_rule *rule, *tmp;
1726 const uint8_t *pu8EthSource = h_source;
1727
1728 /* check mac here */
1729 LIST_FOREACH_SAFE(rule, &pData->port_forward_rule_head, list, tmp)
1730 {
1731 struct socket *so;
1732 struct alias_link *alias_link;
1733 struct libalias *lib;
1734 int flags;
1735 struct sockaddr sa;
1736 struct sockaddr_in *psin;
1737 socklen_t socketlen;
1738 struct in_addr alias;
1739 int rc;
1740 uint32_t guest_addr; /* need to understand if we already give address to guest */
1741
1742 if (rule->activated)
1743 continue;
1744
1745#ifdef VBOX_WITH_NAT_SERVICE
1746 /**
1747 * case when guest ip is INADDR_ANY shouldn't appear in NAT service
1748 */
1749 Assert((rule->guest_addr.s_addr != INADDR_ANY));
1750 guest_addr = rule->guest_addr.s_addr;
1751#else /* VBOX_WITH_NAT_SERVICE */
1752 guest_addr = find_guest_ip(pData, pu8EthSource);
1753#endif /* !VBOX_WITH_NAT_SERVICE */
1754 if (guest_addr == INADDR_ANY)
1755 {
1756 /* the address wasn't granted */
1757 return;
1758 }
1759
1760#if !defined(VBOX_WITH_NAT_SERVICE)
1761 if ( rule->guest_addr.s_addr != guest_addr
1762 && rule->guest_addr.s_addr != INADDR_ANY)
1763 continue;
1764 if (rule->guest_addr.s_addr == INADDR_ANY)
1765 rule->guest_addr.s_addr = guest_addr;
1766#endif
1767
1768 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %RTnaipv4\n",
1769 rule->proto == IPPROTO_UDP ? "UDP" : "TCP", rule->host_port, rule->guest_port, guest_addr));
1770
1771 if (rule->proto == IPPROTO_UDP)
1772 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1773 RT_H2N_U16(rule->guest_port), 0);
1774 else
1775 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1776 RT_H2N_U16(rule->guest_port), 0);
1777
1778 if (so == NULL)
1779 goto remove_port_forwarding;
1780
1781 psin = (struct sockaddr_in *)&sa;
1782 psin->sin_family = AF_INET;
1783 psin->sin_port = 0;
1784 psin->sin_addr.s_addr = INADDR_ANY;
1785 socketlen = sizeof(struct sockaddr);
1786
1787 rc = getsockname(so->s, &sa, &socketlen);
1788 if (rc < 0 || sa.sa_family != AF_INET)
1789 goto remove_port_forwarding;
1790
1791 psin = (struct sockaddr_in *)&sa;
1792
1793 lib = LibAliasInit(pData, NULL);
1794 flags = LibAliasSetMode(lib, 0, 0);
1795 flags |= pData->i32AliasMode;
1796 flags |= PKT_ALIAS_REVERSE; /* set reverse */
1797 flags = LibAliasSetMode(lib, flags, ~0);
1798
1799 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1800 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1801 alias, RT_H2N_U16(rule->guest_port),
1802 pData->special_addr, -1, /* not very clear for now */
1803 rule->proto);
1804 if (!alias_link)
1805 goto remove_port_forwarding;
1806
1807 so->so_la = lib;
1808 rule->activated = 1;
1809 rule->so = so;
1810 pData->cRedirectionsActive++;
1811 continue;
1812
1813 remove_port_forwarding:
1814 LogRel(("NAT: failed to redirect %s %d => %d\n",
1815 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1816 LIST_REMOVE(rule, list);
1817 pData->cRedirectionsStored--;
1818 RTMemFree(rule);
1819 }
1820}
1821
1822/**
1823 * Changes in 3.1 instead of opening new socket do the following:
1824 * gain more information:
1825 * 1. bind IP
1826 * 2. host port
1827 * 3. guest port
1828 * 4. proto
1829 * 5. guest MAC address
1830 * the guest's MAC address is rather important for service, but we easily
1831 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1832 * corresponding port-forwarding
1833 */
1834int slirp_add_redirect(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1835 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1836{
1837 struct port_forward_rule *rule = NULL;
1838 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1839 {
1840 if ( rule->proto == (is_udp ? IPPROTO_UDP : IPPROTO_TCP)
1841 && rule->host_port == host_port
1842 && rule->bind_ip.s_addr == host_addr.s_addr
1843 && rule->guest_port == guest_port
1844 && rule->guest_addr.s_addr == guest_addr.s_addr
1845 )
1846 return 0; /* rule has been already registered */
1847 }
1848
1849 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1850 if (rule == NULL)
1851 return 1;
1852
1853 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1854 rule->host_port = host_port;
1855 rule->guest_port = guest_port;
1856 rule->guest_addr.s_addr = guest_addr.s_addr;
1857 rule->bind_ip.s_addr = host_addr.s_addr;
1858 if (ethaddr != NULL)
1859 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1860 /* @todo add mac address */
1861 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1862 pData->cRedirectionsStored++;
1863 /* activate port-forwarding if guest has already got assigned IP */
1864 if ( ethaddr
1865 && memcmp(ethaddr, zerro_ethaddr, ETH_ALEN))
1866 activate_port_forwarding(pData, ethaddr);
1867 return 0;
1868}
1869
1870int slirp_remove_redirect(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1871 struct in_addr guest_addr, int guest_port)
1872{
1873 struct port_forward_rule *rule = NULL;
1874 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1875 {
1876 if ( rule->proto == (is_udp ? IPPROTO_UDP : IPPROTO_TCP)
1877 && rule->host_port == host_port
1878 && rule->guest_port == guest_port
1879 && rule->bind_ip.s_addr == host_addr.s_addr
1880 && rule->guest_addr.s_addr == guest_addr.s_addr
1881 && rule->activated)
1882 {
1883 LogRel(("NAT: remove redirect %s host port %d => guest port %d @ %RTnaipv4\n",
1884 rule->proto == IPPROTO_UDP ? "UDP" : "TCP", rule->host_port, rule->guest_port, guest_addr));
1885
1886 LibAliasUninit(rule->so->so_la);
1887 if (is_udp)
1888 udp_detach(pData, rule->so);
1889 else
1890 tcp_close(pData, sototcpcb(rule->so));
1891 LIST_REMOVE(rule, list);
1892 RTMemFree(rule);
1893 pData->cRedirectionsStored--;
1894 break;
1895 }
1896
1897 }
1898 return 0;
1899}
1900
1901void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1902{
1903#ifndef VBOX_WITH_NAT_SERVICE
1904 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1905#endif
1906 if (GuestIP != INADDR_ANY)
1907 {
1908 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1909 activate_port_forwarding(pData, ethaddr);
1910 }
1911}
1912
1913#if defined(RT_OS_WINDOWS)
1914HANDLE *slirp_get_events(PNATState pData)
1915{
1916 return pData->phEvents;
1917}
1918void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1919{
1920 pData->phEvents[index] = hEvent;
1921}
1922#endif
1923
1924unsigned int slirp_get_timeout_ms(PNATState pData)
1925{
1926 if (link_up)
1927 {
1928 if (time_fasttimo)
1929 return 2;
1930 if (do_slowtimo)
1931 return 500; /* see PR_SLOWHZ */
1932 }
1933 return 3600*1000; /* one hour */
1934}
1935
1936#ifndef RT_OS_WINDOWS
1937int slirp_get_nsock(PNATState pData)
1938{
1939 return pData->nsock;
1940}
1941#endif
1942
1943/*
1944 * this function called from NAT thread
1945 */
1946void slirp_post_sent(PNATState pData, void *pvArg)
1947{
1948 struct mbuf *m = (struct mbuf *)pvArg;
1949 m_freem(pData, m);
1950}
1951#ifdef VBOX_WITH_SLIRP_MT
1952void slirp_process_queue(PNATState pData)
1953{
1954 RTReqQueueProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1955}
1956void *slirp_get_queue(PNATState pData)
1957{
1958 return pData->pReqQueue;
1959}
1960#endif
1961
1962void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1963{
1964 Log2(("tftp_prefix: %s\n", tftpPrefix));
1965 tftp_prefix = tftpPrefix;
1966}
1967
1968void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1969{
1970 Log2(("bootFile: %s\n", bootFile));
1971 bootp_filename = bootFile;
1972}
1973
1974void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1975{
1976 Log2(("next_server: %s\n", next_server));
1977 if (next_server == NULL)
1978 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1979 else
1980 inet_aton(next_server, &pData->tftp_server);
1981}
1982
1983int slirp_set_binding_address(PNATState pData, char *addr)
1984{
1985 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1986 {
1987 pData->bindIP.s_addr = INADDR_ANY;
1988 return 1;
1989 }
1990 return 0;
1991}
1992
1993void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1994{
1995 if (!pData->fUseHostResolver)
1996 {
1997 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1998 pData->fUseDnsProxy = fDNSProxy;
1999 }
2000 else if (fDNSProxy)
2001 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
2002}
2003
2004#define CHECK_ARG(name, val, lim_min, lim_max) \
2005 do { \
2006 if ((val) < (lim_min) || (val) > (lim_max)) \
2007 { \
2008 LogRel(("NAT: (" #name ":%d) has been ignored, " \
2009 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
2010 return; \
2011 } \
2012 else \
2013 LogRel(("NAT: (" #name ":%d)\n", (val))); \
2014 } while (0)
2015
2016void slirp_set_somaxconn(PNATState pData, int iSoMaxConn)
2017{
2018 LogFlowFunc(("iSoMaxConn:d\n", iSoMaxConn));
2019 /* Conditions */
2020 if (iSoMaxConn > SOMAXCONN)
2021 {
2022 LogRel(("NAT: value of somaxconn(%d) bigger than SOMAXCONN(%d)\n", iSoMaxConn, SOMAXCONN));
2023 iSoMaxConn = SOMAXCONN;
2024 }
2025
2026 if (iSoMaxConn < 1)
2027 {
2028 LogRel(("NAT: proposed value(%d) of somaxconn is invalid, default value is used (%d)\n", iSoMaxConn, pData->soMaxConn));
2029 LogFlowFuncLeave();
2030 return;
2031 }
2032
2033 /* Asignment */
2034 if (pData->soMaxConn != iSoMaxConn)
2035 {
2036 LogRel(("NAT: value of somaxconn has been changed from %d to %d\n",
2037 pData->soMaxConn, iSoMaxConn));
2038 pData->soMaxConn = iSoMaxConn;
2039 }
2040 LogFlowFuncLeave();
2041}
2042/* don't allow user set less 8kB and more than 1M values */
2043#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
2044void slirp_set_rcvbuf(PNATState pData, int kilobytes)
2045{
2046 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
2047 pData->socket_rcv = kilobytes;
2048}
2049void slirp_set_sndbuf(PNATState pData, int kilobytes)
2050{
2051 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
2052 pData->socket_snd = kilobytes * _1K;
2053}
2054void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
2055{
2056 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
2057 tcp_rcvspace = kilobytes * _1K;
2058}
2059void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
2060{
2061 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
2062 tcp_sndspace = kilobytes * _1K;
2063}
2064
2065/*
2066 * Looking for Ether by ip in ARP-cache
2067 * Note: it´s responsible of caller to allocate buffer for result
2068 * @returns iprt status code
2069 */
2070int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
2071{
2072 struct arp_cache_entry *ac;
2073
2074 if (ether == NULL)
2075 return VERR_INVALID_PARAMETER;
2076
2077 if (LIST_EMPTY(&pData->arp_cache))
2078 return VERR_NOT_FOUND;
2079
2080 LIST_FOREACH(ac, &pData->arp_cache, list)
2081 {
2082 if ( ac->ip == ip
2083 && memcmp(ac->ether, broadcast_ethaddr, ETH_ALEN) != 0)
2084 {
2085 memcpy(ether, ac->ether, ETH_ALEN);
2086 return VINF_SUCCESS;
2087 }
2088 }
2089 return VERR_NOT_FOUND;
2090}
2091
2092/*
2093 * Looking for IP by Ether in ARP-cache
2094 * Note: it´s responsible of caller to allocate buffer for result
2095 * @returns 0 - if found, 1 - otherwise
2096 */
2097int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
2098{
2099 struct arp_cache_entry *ac;
2100 *ip = INADDR_ANY;
2101
2102 if (LIST_EMPTY(&pData->arp_cache))
2103 return VERR_NOT_FOUND;
2104
2105 LIST_FOREACH(ac, &pData->arp_cache, list)
2106 {
2107 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
2108 {
2109 *ip = ac->ip;
2110 return VINF_SUCCESS;
2111 }
2112 }
2113 return VERR_NOT_FOUND;
2114}
2115
2116void slirp_arp_who_has(PNATState pData, uint32_t dst)
2117{
2118 struct mbuf *m;
2119 struct ethhdr *ehdr;
2120 struct arphdr *ahdr;
2121 static bool fWarned = false;
2122 LogFlowFunc(("ENTER: %RTnaipv4\n", dst));
2123
2124 /* ARP request WHO HAS 0.0.0.0 is one of the signals
2125 * that something has been broken at Slirp. Investigating
2126 * pcap dumps it's easy to miss warning ARP requests being
2127 * focused on investigation of other protocols flow.
2128 */
2129#ifdef DEBUG_vvl
2130 Assert((dst != INADDR_ANY));
2131 NOREF(fWarned);
2132#else
2133 if ( dst == INADDR_ANY
2134 && !fWarned)
2135 {
2136 LogRel(("NAT:ARP: \"WHO HAS INADDR_ANY\" request has been detected\n"));
2137 fWarned = true;
2138 }
2139#endif /* !DEBUG_vvl */
2140
2141 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
2142 if (m == NULL)
2143 {
2144 Log(("NAT: Can't alloc mbuf for ARP request\n"));
2145 LogFlowFuncLeave();
2146 return;
2147 }
2148 ehdr = mtod(m, struct ethhdr *);
2149 memset(ehdr->h_source, 0xff, ETH_ALEN);
2150 ahdr = (struct arphdr *)&ehdr[1];
2151 ahdr->ar_hrd = RT_H2N_U16_C(1);
2152 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2153 ahdr->ar_hln = ETH_ALEN;
2154 ahdr->ar_pln = 4;
2155 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2156 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2157 /* we assume that this request come from gw, but not from DNS or TFTP */
2158 ahdr->ar_sha[5] = CTL_ALIAS;
2159 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2160 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2161 *(uint32_t *)ahdr->ar_tip = dst;
2162 /* warn!!! should falls in mbuf minimal size */
2163 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2164 m->m_data += ETH_HLEN;
2165 m->m_len -= ETH_HLEN;
2166 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2167 LogFlowFuncLeave();
2168}
2169#ifdef VBOX_WITH_DNSMAPPING_IN_HOSTRESOLVER
2170void slirp_add_host_resolver_mapping(PNATState pData, const char *pszHostName, const char *pszHostNamePattern, uint32_t u32HostIP)
2171{
2172 LogFlowFunc(("ENTER: pszHostName:%s, pszHostNamePattern:%s u32HostIP:%RTnaipv4\n",
2173 pszHostName ? pszHostName : "(null)",
2174 pszHostNamePattern ? pszHostNamePattern : "(null)",
2175 u32HostIP));
2176 if ( ( pszHostName
2177 || pszHostNamePattern)
2178 && u32HostIP != INADDR_ANY
2179 && u32HostIP != INADDR_BROADCAST)
2180 {
2181 PDNSMAPPINGENTRY pDnsMapping = RTMemAllocZ(sizeof(DNSMAPPINGENTRY));
2182 if (!pDnsMapping)
2183 {
2184 LogFunc(("Can't allocate DNSMAPPINGENTRY\n"));
2185 LogFlowFuncLeave();
2186 return;
2187 }
2188 pDnsMapping->u32IpAddress = u32HostIP;
2189 if (pszHostName)
2190 pDnsMapping->pszCName = RTStrDup(pszHostName);
2191 else if (pszHostNamePattern)
2192 pDnsMapping->pszPattern = RTStrDup(pszHostNamePattern);
2193 if ( !pDnsMapping->pszCName
2194 && !pDnsMapping->pszPattern)
2195 {
2196 LogFunc(("Can't allocate enough room for %s\n", pszHostName ? pszHostName : pszHostNamePattern));
2197 RTMemFree(pDnsMapping);
2198 LogFlowFuncLeave();
2199 return;
2200 }
2201 LIST_INSERT_HEAD(&pData->DNSMapHead, pDnsMapping, MapList);
2202 LogRel(("NAT: user-defined mapping %s: %RTnaipv4 is registered\n",
2203 pDnsMapping->pszCName ? pDnsMapping->pszCName : pDnsMapping->pszPattern,
2204 pDnsMapping->u32IpAddress));
2205 }
2206 LogFlowFuncLeave();
2207}
2208#endif
2209
2210/* updates the arp cache
2211 * @note: this is helper function, slirp_arp_cache_update_or_add should be used.
2212 * @returns 0 - if has found and updated
2213 * 1 - if hasn't found.
2214 */
2215static inline int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2216{
2217 struct arp_cache_entry *ac;
2218 Assert(( memcmp(mac, broadcast_ethaddr, ETH_ALEN)
2219 && memcmp(mac, zerro_ethaddr, ETH_ALEN)));
2220 LIST_FOREACH(ac, &pData->arp_cache, list)
2221 {
2222 if (!memcmp(ac->ether, mac, ETH_ALEN))
2223 {
2224 ac->ip = dst;
2225 return 0;
2226 }
2227 }
2228 return 1;
2229}
2230
2231/**
2232 * add entry to the arp cache
2233 * @note: this is helper function, slirp_arp_cache_update_or_add should be used.
2234 */
2235static inline void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2236{
2237 struct arp_cache_entry *ac = NULL;
2238 Assert(( memcmp(ether, broadcast_ethaddr, ETH_ALEN)
2239 && memcmp(ether, zerro_ethaddr, ETH_ALEN)));
2240 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2241 if (ac == NULL)
2242 {
2243 Log(("NAT: Can't allocate arp cache entry\n"));
2244 return;
2245 }
2246 ac->ip = ip;
2247 memcpy(ac->ether, ether, ETH_ALEN);
2248 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2249}
2250
2251/* updates or adds entry to the arp cache
2252 * @returns 0 - if has found and updated
2253 * 1 - if hasn't found.
2254 */
2255int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2256{
2257 if ( !memcmp(mac, broadcast_ethaddr, ETH_ALEN)
2258 || !memcmp(mac, zerro_ethaddr, ETH_ALEN))
2259 {
2260 static bool fBroadcastEtherAddReported;
2261 if (!fBroadcastEtherAddReported)
2262 {
2263 LogRel(("NAT: Attempt to add pair [%RTmac:%RTnaipv4] in ARP cache was ignored\n",
2264 mac, dst));
2265 fBroadcastEtherAddReported = true;
2266 }
2267 return 1;
2268 }
2269 if (slirp_arp_cache_update(pData, dst, mac))
2270 slirp_arp_cache_add(pData, dst, mac);
2271
2272 return 0;
2273}
2274
2275
2276void slirp_set_mtu(PNATState pData, int mtu)
2277{
2278 if (mtu < 20 || mtu >= 16000)
2279 {
2280 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2281 mtu = 1500;
2282 }
2283 /* MTU is maximum transition unit on */
2284 if_mtu =
2285 if_mru = mtu;
2286}
2287
2288/**
2289 * Info handler.
2290 */
2291void slirp_info(PNATState pData, PCDBGFINFOHLP pHlp, const char *pszArgs)
2292{
2293 struct socket *so, *so_next;
2294 struct arp_cache_entry *ac;
2295 struct port_forward_rule *rule;
2296 NOREF(pszArgs);
2297
2298 pHlp->pfnPrintf(pHlp, "NAT parameters: MTU=%d\n", if_mtu);
2299 pHlp->pfnPrintf(pHlp, "NAT TCP ports:\n");
2300 QSOCKET_FOREACH(so, so_next, tcp)
2301 /* { */
2302 pHlp->pfnPrintf(pHlp, " %R[natsock]\n", so);
2303 }
2304
2305 pHlp->pfnPrintf(pHlp, "NAT UDP ports:\n");
2306 QSOCKET_FOREACH(so, so_next, udp)
2307 /* { */
2308 pHlp->pfnPrintf(pHlp, " %R[natsock]\n", so);
2309 }
2310
2311 pHlp->pfnPrintf(pHlp, "NAT ARP cache:\n");
2312 LIST_FOREACH(ac, &pData->arp_cache, list)
2313 {
2314 pHlp->pfnPrintf(pHlp, " %RTnaipv4 %RTmac\n", ac->ip, &ac->ether);
2315 }
2316
2317 pHlp->pfnPrintf(pHlp, "NAT rules:\n");
2318 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
2319 {
2320 pHlp->pfnPrintf(pHlp, " %s %d => %RTnaipv4:%d %c\n",
2321 rule->proto == IPPROTO_UDP ? "UDP" : "TCP",
2322 rule->host_port, rule->guest_addr.s_addr, rule->guest_port,
2323 rule->activated ? ' ' : '*');
2324 }
2325}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette