VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 29855

Last change on this file since 29855 was 29855, checked in by vboxsync, 15 years ago

NAT: nits and a range fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.1 KB
Line 
1/* $Id: slirp.c 29855 2010-05-28 09:49:58Z vboxsync $ */
2/** @file
3 * NAT - slirp glue.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * libslirp glue
22 *
23 * Copyright (c) 2004-2008 Fabrice Bellard
24 *
25 * Permission is hereby granted, free of charge, to any person obtaining a copy
26 * of this software and associated documentation files (the "Software"), to deal
27 * in the Software without restriction, including without limitation the rights
28 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29 * copies of the Software, and to permit persons to whom the Software is
30 * furnished to do so, subject to the following conditions:
31 *
32 * The above copyright notice and this permission notice shall be included in
33 * all copies or substantial portions of the Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41 * THE SOFTWARE.
42 */
43
44#include "slirp.h"
45#ifdef RT_OS_OS2
46# include <paths.h>
47#endif
48
49#include <VBox/err.h>
50#include <VBox/pdmdrv.h>
51#include <iprt/assert.h>
52#include <iprt/file.h>
53#ifndef RT_OS_WINDOWS
54# include <sys/ioctl.h>
55# include <poll.h>
56#else
57# include <Winnls.h>
58# define _WINSOCK2API_
59# include <IPHlpApi.h>
60#endif
61#include <alias.h>
62
63#ifndef RT_OS_WINDOWS
64
65# define DO_ENGAGE_EVENT1(so, fdset, label) \
66 do { \
67 if ( so->so_poll_index != -1 \
68 && so->s == polls[so->so_poll_index].fd) \
69 { \
70 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
71 break; \
72 } \
73 AssertRelease(poll_index < (nfds)); \
74 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
75 polls[poll_index].fd = (so)->s; \
76 (so)->so_poll_index = poll_index; \
77 polls[poll_index].events = N_(fdset ## _poll); \
78 polls[poll_index].revents = 0; \
79 poll_index++; \
80 } while (0)
81
82# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
83 do { \
84 if ( so->so_poll_index != -1 \
85 && so->s == polls[so->so_poll_index].fd) \
86 { \
87 polls[so->so_poll_index].events |= \
88 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
89 break; \
90 } \
91 AssertRelease(poll_index < (nfds)); \
92 polls[poll_index].fd = (so)->s; \
93 (so)->so_poll_index = poll_index; \
94 polls[poll_index].events = \
95 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
96 poll_index++; \
97 } while (0)
98
99# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
100
101/*
102 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
103 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
104 * used to catch POLLNVAL while logging and return false in case of error while
105 * normal usage.
106 */
107# define DO_CHECK_FD_SET(so, events, fdset) \
108 ( ((so)->so_poll_index != -1) \
109 && ((so)->so_poll_index <= ndfs) \
110 && ((so)->s == polls[so->so_poll_index].fd) \
111 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
112 && ( N_(fdset ## _poll) == POLLNVAL \
113 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
114
115 /* specific for Unix API */
116# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
117 /* specific for Windows Winsock API */
118# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
119
120# ifndef RT_OS_LINUX
121# define readfds_poll (POLLRDNORM)
122# define writefds_poll (POLLWRNORM)
123# else
124# define readfds_poll (POLLIN)
125# define writefds_poll (POLLOUT)
126# endif
127# define xfds_poll (POLLPRI)
128# define closefds_poll (POLLHUP)
129# define rderr_poll (POLLERR)
130# define rdhup_poll (POLLHUP)
131# define nval_poll (POLLNVAL)
132
133# define ICMP_ENGAGE_EVENT(so, fdset) \
134 do { \
135 if (pData->icmp_socket.s != -1) \
136 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
137 } while (0)
138
139#else /* RT_OS_WINDOWS */
140
141/*
142 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
143 * So no call to WSAEventSelect necessary.
144 */
145# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
146
147/*
148 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
149 */
150# define DO_ENGAGE_EVENT1(so, fdset1, label) \
151 do { \
152 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
153 if (rc == SOCKET_ERROR) \
154 { \
155 /* This should not happen */ \
156 error = WSAGetLastError(); \
157 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
158 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
159 } \
160 } while (0); \
161 CONTINUE(label)
162
163# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
164 DO_ENGAGE_EVENT1((so), (fdset1), label)
165
166# define DO_POLL_EVENTS(rc, error, so, events, label) \
167 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
168 if ((rc) == SOCKET_ERROR) \
169 { \
170 (error) = WSAGetLastError(); \
171 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
172 CONTINUE(label); \
173 }
174
175# define acceptds_win FD_ACCEPT
176# define acceptds_win_bit FD_ACCEPT_BIT
177# define readfds_win FD_READ
178# define readfds_win_bit FD_READ_BIT
179# define writefds_win FD_WRITE
180# define writefds_win_bit FD_WRITE_BIT
181# define xfds_win FD_OOB
182# define xfds_win_bit FD_OOB_BIT
183# define closefds_win FD_CLOSE
184# define closefds_win_bit FD_CLOSE_BIT
185
186# define closefds_win FD_CLOSE
187# define closefds_win_bit FD_CLOSE_BIT
188
189# define DO_CHECK_FD_SET(so, events, fdset) \
190 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
191
192# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
193# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
194
195#endif /* RT_OS_WINDOWS */
196
197#define TCP_ENGAGE_EVENT1(so, fdset) \
198 DO_ENGAGE_EVENT1((so), fdset, tcp)
199
200#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
201 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
202
203#define UDP_ENGAGE_EVENT(so, fdset) \
204 DO_ENGAGE_EVENT1((so), fdset, udp)
205
206#define POLL_TCP_EVENTS(rc, error, so, events) \
207 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
208
209#define POLL_UDP_EVENTS(rc, error, so, events) \
210 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
211
212#define CHECK_FD_SET(so, events, set) \
213 (DO_CHECK_FD_SET((so), (events), set))
214
215#define WIN_CHECK_FD_SET(so, events, set) \
216 (DO_WIN_CHECK_FD_SET((so), (events), set))
217
218#define UNIX_CHECK_FD_SET(so, events, set) \
219 (DO_UNIX_CHECK_FD_SET(so, events, set))
220
221/*
222 * Loging macros
223 */
224#if VBOX_WITH_DEBUG_NAT_SOCKETS
225# if defined(RT_OS_WINDOWS)
226# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
227 do { \
228 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
229 } while (0)
230# else /* !RT_OS_WINDOWS */
231# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
232 do { \
233 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
234 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
235 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
236 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
237 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
238 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
239 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
240 } while (0)
241# endif /* !RT_OS_WINDOWS */
242#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
243# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
244#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
245
246#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
247 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
248
249static void activate_port_forwarding(PNATState, const uint8_t *pEther);
250
251static const uint8_t special_ethaddr[6] =
252{
253 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
254};
255
256static const uint8_t broadcast_ethaddr[6] =
257{
258 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
259};
260
261const uint8_t zerro_ethaddr[6] =
262{
263 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
264};
265
266#ifdef RT_OS_WINDOWS
267static int get_dns_addr_domain(PNATState pData, bool fVerbose,
268 struct in_addr *pdns_addr,
269 const char **ppszDomain)
270{
271 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
272 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
273 PIP_ADAPTER_ADDRESSES pAddr = NULL;
274 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
275 ULONG size;
276 int wlen = 0;
277 char *pszSuffix;
278 struct dns_domain_entry *pDomain = NULL;
279 ULONG ret = ERROR_SUCCESS;
280
281 /* @todo add SKIPing flags to get only required information */
282
283 /* determine size of buffer */
284 size = 0;
285 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
286 if (ret != ERROR_BUFFER_OVERFLOW)
287 {
288 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
289 return -1;
290 }
291 if (size == 0)
292 {
293 LogRel(("NAT: Win socket API returns non capacity\n"));
294 return -1;
295 }
296
297 pAdapterAddr = RTMemAllocZ(size);
298 if (!pAdapterAddr)
299 {
300 LogRel(("NAT: No memory available \n"));
301 return -1;
302 }
303 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
304 if (ret != ERROR_SUCCESS)
305 {
306 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
307 RTMemFree(pAdapterAddr);
308 return -1;
309 }
310
311 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
312 {
313 int found;
314 if (pAddr->OperStatus != IfOperStatusUp)
315 continue;
316
317 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
318 {
319 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
320 struct in_addr InAddr;
321 struct dns_entry *pDns;
322
323 if (SockAddr->sa_family != AF_INET)
324 continue;
325
326 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
327
328 /* add dns server to list */
329 pDns = RTMemAllocZ(sizeof(struct dns_entry));
330 if (!pDns)
331 {
332 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
333 RTMemFree(pAdapterAddr);
334 return VERR_NO_MEMORY;
335 }
336
337 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
338 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
339 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
340 else
341 pDns->de_addr.s_addr = InAddr.s_addr;
342
343 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
344
345 if (pAddr->DnsSuffix == NULL)
346 continue;
347
348 /* uniq */
349 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
350 if (!pszSuffix || strlen(pszSuffix) == 0)
351 {
352 RTStrFree(pszSuffix);
353 continue;
354 }
355
356 found = 0;
357 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
358 {
359 if ( pDomain->dd_pszDomain != NULL
360 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
361 {
362 found = 1;
363 RTStrFree(pszSuffix);
364 break;
365 }
366 }
367 if (!found)
368 {
369 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
370 if (!pDomain)
371 {
372 LogRel(("NAT: not enough memory\n"));
373 RTStrFree(pszSuffix);
374 RTMemFree(pAdapterAddr);
375 return VERR_NO_MEMORY;
376 }
377 pDomain->dd_pszDomain = pszSuffix;
378 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
379 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
380 }
381 }
382 }
383 RTMemFree(pAdapterAddr);
384 return 0;
385}
386
387#else /* !RT_OS_WINDOWS */
388
389static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
390{
391 size_t cbRead;
392 char bTest;
393 int rc = VERR_NO_MEMORY;
394 char *pu8Buf = (char *)pvBuf;
395 *pcbRead = 0;
396
397 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
398 && (pu8Buf - (char *)pvBuf) < cbBufSize)
399 {
400 if (cbRead == 0)
401 return VERR_EOF;
402
403 if (bTest == '\r' || bTest == '\n')
404 {
405 *pu8Buf = 0;
406 return VINF_SUCCESS;
407 }
408 *pu8Buf = bTest;
409 pu8Buf++;
410 (*pcbRead)++;
411 }
412 return rc;
413}
414
415static int get_dns_addr_domain(PNATState pData, bool fVerbose,
416 struct in_addr *pdns_addr,
417 const char **ppszDomain)
418{
419 char buff[512];
420 char buff2[256];
421 RTFILE f;
422 int cNameserversFound = 0;
423 int fWarnTooManyDnsServers = 0;
424 struct in_addr tmp_addr;
425 int rc;
426 size_t bytes;
427
428# ifdef RT_OS_OS2
429 /* Try various locations. */
430 char *etc = getenv("ETC");
431 if (etc)
432 {
433 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
434 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
435 }
436 if (RT_FAILURE(rc))
437 {
438 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
439 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
440 }
441 if (RT_FAILURE(rc))
442 {
443 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
444 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
445 }
446# else /* !RT_OS_OS2 */
447# ifndef DEBUG_vvl
448 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
449# else
450 char *home = getenv("HOME");
451 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
452 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
453 if (RT_SUCCESS(rc))
454 {
455 Log(("NAT: DNS we're using %s\n", buff));
456 }
457 else
458 {
459 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
460 Log(("NAT: DNS we're using %s\n", buff));
461 }
462# endif
463# endif /* !RT_OS_OS2 */
464 if (RT_FAILURE(rc))
465 return -1;
466
467 if (ppszDomain)
468 *ppszDomain = NULL;
469
470 Log(("NAT: DNS Servers:\n"));
471 while ( RT_SUCCESS(rc = RTFileGets(f, buff, sizeof(buff), &bytes))
472 && rc != VERR_EOF)
473 {
474 struct dns_entry *pDns = NULL;
475 if ( cNameserversFound == 4
476 && fWarnTooManyDnsServers == 0
477 && sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1)
478 {
479 fWarnTooManyDnsServers = 1;
480 LogRel(("NAT: too many nameservers registered.\n"));
481 }
482 if ( sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1
483 && cNameserversFound < 4) /* Unix doesn't accept more than 4 name servers*/
484 {
485 if (!inet_aton(buff2, &tmp_addr))
486 continue;
487
488 /* localhost mask */
489 pDns = RTMemAllocZ(sizeof (struct dns_entry));
490 if (!pDns)
491 {
492 LogRel(("can't alloc memory for DNS entry\n"));
493 return -1;
494 }
495
496 /* check */
497 pDns->de_addr.s_addr = tmp_addr.s_addr;
498 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
499 {
500 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
501 }
502 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
503 cNameserversFound++;
504 }
505 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
506 {
507 char *tok;
508 char *saveptr;
509 struct dns_domain_entry *pDomain = NULL;
510 int fFoundDomain = 0;
511 tok = strtok_r(&buff[6], " \t\n", &saveptr);
512 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
513 {
514 if ( tok != NULL
515 && strcmp(tok, pDomain->dd_pszDomain) == 0)
516 {
517 fFoundDomain = 1;
518 break;
519 }
520 }
521 if (tok != NULL && !fFoundDomain)
522 {
523 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
524 if (!pDomain)
525 {
526 LogRel(("NAT: not enought memory to add domain list\n"));
527 return VERR_NO_MEMORY;
528 }
529 pDomain->dd_pszDomain = RTStrDup(tok);
530 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
531 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
532 }
533 }
534 }
535 RTFileClose(f);
536 if (!cNameserversFound)
537 return -1;
538 return 0;
539}
540
541#endif /* !RT_OS_WINDOWS */
542
543int slirp_init_dns_list(PNATState pData)
544{
545 TAILQ_INIT(&pData->pDnsList);
546 LIST_INIT(&pData->pDomainList);
547 return get_dns_addr_domain(pData, true, NULL, NULL);
548}
549
550void slirp_release_dns_list(PNATState pData)
551{
552 struct dns_entry *pDns = NULL;
553 struct dns_domain_entry *pDomain = NULL;
554
555 while (!TAILQ_EMPTY(&pData->pDnsList))
556 {
557 pDns = TAILQ_FIRST(&pData->pDnsList);
558 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
559 RTMemFree(pDns);
560 }
561
562 while (!LIST_EMPTY(&pData->pDomainList))
563 {
564 pDomain = LIST_FIRST(&pData->pDomainList);
565 LIST_REMOVE(pDomain, dd_list);
566 if (pDomain->dd_pszDomain != NULL)
567 RTStrFree(pDomain->dd_pszDomain);
568 RTMemFree(pDomain);
569 }
570}
571
572int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
573{
574 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
575}
576
577int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
578 bool fPassDomain, bool fUseHostResolver, int i32AliasMode, void *pvUser)
579{
580 int fNATfailed = 0;
581 int rc;
582 PNATState pData = RTMemAllocZ(sizeof(NATState));
583 *ppData = pData;
584 if (!pData)
585 return VERR_NO_MEMORY;
586 if (u32Netmask & 0x1f)
587 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
588 return VERR_INVALID_PARAMETER;
589 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
590 pData->fUseHostResolver = fUseHostResolver;
591 pData->pvUser = pvUser;
592 pData->netmask = u32Netmask;
593
594 /* sockets & TCP defaults */
595 pData->socket_rcv = 64 * _1K;
596 pData->socket_snd = 64 * _1K;
597 tcp_sndspace = 64 * _1K;
598 tcp_rcvspace = 64 * _1K;
599
600#ifdef RT_OS_WINDOWS
601 {
602 WSADATA Data;
603 WSAStartup(MAKEWORD(2, 0), &Data);
604 }
605 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
606#endif
607#ifdef VBOX_WITH_SLIRP_MT
608 QSOCKET_LOCK_CREATE(tcb);
609 QSOCKET_LOCK_CREATE(udb);
610 rc = RTReqCreateQueue(&pData->pReqQueue);
611 AssertReleaseRC(rc);
612#endif
613
614 link_up = 1;
615
616 rc = bootp_dhcp_init(pData);
617 if (rc != 0)
618 {
619 LogRel(("NAT: DHCP server initialization was failed\n"));
620 return VINF_NAT_DNS;
621 }
622 debug_init();
623 if_init(pData);
624 ip_init(pData);
625 icmp_init(pData);
626
627 /* Initialise mbufs *after* setting the MTU */
628#ifndef VBOX_WITH_SLIRP_BSD_MBUF
629 m_init(pData);
630#else
631 mbuf_init(pData);
632#endif
633
634 pData->special_addr.s_addr = u32NetAddr;
635 pData->slirp_ethaddr = &special_ethaddr[0];
636 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
637 /* @todo: add ability to configure this staff */
638
639 /* set default addresses */
640 inet_aton("127.0.0.1", &loopback_addr);
641 if (!pData->fUseHostResolver)
642 {
643 if (slirp_init_dns_list(pData) < 0)
644 fNATfailed = 1;
645
646 dnsproxy_init(pData);
647 }
648 if (i32AliasMode & ~(PKT_ALIAS_LOG|PKT_ALIAS_SAME_PORTS|PKT_ALIAS_PROXY_ONLY))
649 {
650 LogRel(("NAT: alias mode %x is ignored\n", i32AliasMode));
651 i32AliasMode = 0;
652 }
653 pData->i32AliasMode = i32AliasMode;
654 getouraddr(pData);
655 {
656 int flags = 0;
657 struct in_addr proxy_addr;
658 pData->proxy_alias = LibAliasInit(pData, NULL);
659 if (pData->proxy_alias == NULL)
660 {
661 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
662 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
663 }
664 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
665#ifndef NO_FW_PUNCH
666 flags |= PKT_ALIAS_PUNCH_FW;
667#endif
668 flags |= pData->i32AliasMode; /* do transparent proxying */
669 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
670 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
671 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
672 ftp_alias_load(pData);
673 nbt_alias_load(pData);
674 if (pData->fUseHostResolver)
675 dns_alias_load(pData);
676 }
677 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
678}
679
680/**
681 * Register statistics.
682 */
683void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
684{
685#ifdef VBOX_WITH_STATISTICS
686# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
687# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
688# include "counters.h"
689# undef COUNTER
690/** @todo register statistics for the variables dumped by:
691 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
692 * mbufstats(pData); sockstats(pData); */
693#endif /* VBOX_WITH_STATISTICS */
694}
695
696/**
697 * Deregister statistics.
698 */
699void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
700{
701 if (pData == NULL)
702 return;
703#ifdef VBOX_WITH_STATISTICS
704# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
705# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
706# include "counters.h"
707#endif /* VBOX_WITH_STATISTICS */
708}
709
710/**
711 * Marks the link as up, making it possible to establish new connections.
712 */
713void slirp_link_up(PNATState pData)
714{
715 struct arp_cache_entry *ac;
716 link_up = 1;
717
718 if (LIST_EMPTY(&pData->arp_cache))
719 return;
720
721 LIST_FOREACH(ac, &pData->arp_cache, list)
722 {
723 activate_port_forwarding(pData, ac->ether);
724 }
725}
726
727/**
728 * Marks the link as down and cleans up the current connections.
729 */
730void slirp_link_down(PNATState pData)
731{
732 struct socket *so;
733 struct port_forward_rule *rule;
734
735 while ((so = tcb.so_next) != &tcb)
736 {
737 if (so->so_state & SS_NOFDREF || so->s == -1)
738 sofree(pData, so);
739 else
740 tcp_drop(pData, sototcpcb(so), 0);
741 }
742
743 while ((so = udb.so_next) != &udb)
744 udp_detach(pData, so);
745
746 /*
747 * Clear the active state of port-forwarding rules to force
748 * re-setup on restoration of communications.
749 */
750 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
751 {
752 rule->activated = 0;
753 }
754 pData->cRedirectionsActive = 0;
755
756 link_up = 0;
757}
758
759/**
760 * Terminates the slirp component.
761 */
762void slirp_term(PNATState pData)
763{
764 if (pData == NULL)
765 return;
766#ifdef RT_OS_WINDOWS
767 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
768 FreeLibrary(pData->hmIcmpLibrary);
769 RTMemFree(pData->pvIcmpBuffer);
770#else
771 closesocket(pData->icmp_socket.s);
772#endif
773
774 slirp_link_down(pData);
775 slirp_release_dns_list(pData);
776 ftp_alias_unload(pData);
777 nbt_alias_unload(pData);
778 if (pData->fUseHostResolver)
779 dns_alias_unload(pData);
780 while (!LIST_EMPTY(&instancehead))
781 {
782 struct libalias *la = LIST_FIRST(&instancehead);
783 /* libalias do all clean up */
784 LibAliasUninit(la);
785 }
786 while (!LIST_EMPTY(&pData->arp_cache))
787 {
788 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
789 LIST_REMOVE(ac, list);
790 RTMemFree(ac);
791 }
792 bootp_dhcp_fini(pData);
793 m_fini(pData);
794#ifdef RT_OS_WINDOWS
795 WSACleanup();
796#endif
797#ifdef LOG_ENABLED
798 Log(("\n"
799 "NAT statistics\n"
800 "--------------\n"
801 "\n"));
802 ipstats(pData);
803 tcpstats(pData);
804 udpstats(pData);
805 icmpstats(pData);
806 mbufstats(pData);
807 sockstats(pData);
808 Log(("\n"
809 "\n"
810 "\n"));
811#endif
812 RTMemFree(pData);
813}
814
815
816#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
817#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
818
819/*
820 * curtime kept to an accuracy of 1ms
821 */
822static void updtime(PNATState pData)
823{
824#ifdef RT_OS_WINDOWS
825 struct _timeb tb;
826
827 _ftime(&tb);
828 curtime = (u_int)tb.time * (u_int)1000;
829 curtime += (u_int)tb.millitm;
830#else
831 gettimeofday(&tt, 0);
832
833 curtime = (u_int)tt.tv_sec * (u_int)1000;
834 curtime += (u_int)tt.tv_usec / (u_int)1000;
835
836 if ((tt.tv_usec % 1000) >= 500)
837 curtime++;
838#endif
839}
840
841#ifdef RT_OS_WINDOWS
842void slirp_select_fill(PNATState pData, int *pnfds)
843#else /* RT_OS_WINDOWS */
844void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
845#endif /* !RT_OS_WINDOWS */
846{
847 struct socket *so, *so_next;
848 int nfds;
849#if defined(RT_OS_WINDOWS)
850 int rc;
851 int error;
852#else
853 int poll_index = 0;
854#endif
855 int i;
856
857 STAM_PROFILE_START(&pData->StatFill, a);
858
859 nfds = *pnfds;
860
861 /*
862 * First, TCP sockets
863 */
864 do_slowtimo = 0;
865 if (!link_up)
866 goto done;
867
868 /*
869 * *_slowtimo needs calling if there are IP fragments
870 * in the fragment queue, or there are TCP connections active
871 */
872 /* XXX:
873 * triggering of fragment expiration should be the same but use new macroses
874 */
875 do_slowtimo = (tcb.so_next != &tcb);
876 if (!do_slowtimo)
877 {
878 for (i = 0; i < IPREASS_NHASH; i++)
879 {
880 if (!TAILQ_EMPTY(&ipq[i]))
881 {
882 do_slowtimo = 1;
883 break;
884 }
885 }
886 }
887 /* always add the ICMP socket */
888#ifndef RT_OS_WINDOWS
889 pData->icmp_socket.so_poll_index = -1;
890#endif
891 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
892
893 STAM_COUNTER_RESET(&pData->StatTCP);
894 STAM_COUNTER_RESET(&pData->StatTCPHot);
895
896 QSOCKET_FOREACH(so, so_next, tcp)
897 /* { */
898#if !defined(RT_OS_WINDOWS)
899 so->so_poll_index = -1;
900#endif
901#ifndef VBOX_WITH_SLIRP_BSD_MBUF
902 if (pData->fmbuf_water_line == 1)
903 {
904 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
905 {
906 pData->fmbuf_water_warn_sent = 0;
907 pData->fmbuf_water_line = 0;
908 }
909# ifndef RT_OS_WINDOWS
910 poll_index = 0;
911# endif
912 goto done;
913 }
914#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
915 STAM_COUNTER_INC(&pData->StatTCP);
916
917 /*
918 * See if we need a tcp_fasttimo
919 */
920 if ( time_fasttimo == 0
921 && so->so_tcpcb != NULL
922 && so->so_tcpcb->t_flags & TF_DELACK)
923 {
924 time_fasttimo = curtime; /* Flag when we want a fasttimo */
925 }
926
927 /*
928 * NOFDREF can include still connecting to local-host,
929 * newly socreated() sockets etc. Don't want to select these.
930 */
931 if (so->so_state & SS_NOFDREF || so->s == -1)
932 CONTINUE(tcp);
933
934 /*
935 * Set for reading sockets which are accepting
936 */
937 if (so->so_state & SS_FACCEPTCONN)
938 {
939 STAM_COUNTER_INC(&pData->StatTCPHot);
940 TCP_ENGAGE_EVENT1(so, readfds);
941 CONTINUE(tcp);
942 }
943
944 /*
945 * Set for writing sockets which are connecting
946 */
947 if (so->so_state & SS_ISFCONNECTING)
948 {
949 Log2(("connecting %R[natsock] engaged\n",so));
950 STAM_COUNTER_INC(&pData->StatTCPHot);
951 TCP_ENGAGE_EVENT1(so, writefds);
952 }
953
954 /*
955 * Set for writing if we are connected, can send more, and
956 * we have something to send
957 */
958 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
959 {
960 STAM_COUNTER_INC(&pData->StatTCPHot);
961 TCP_ENGAGE_EVENT1(so, writefds);
962 }
963
964 /*
965 * Set for reading (and urgent data) if we are connected, can
966 * receive more, and we have room for it XXX /2 ?
967 */
968 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
969 {
970 STAM_COUNTER_INC(&pData->StatTCPHot);
971 TCP_ENGAGE_EVENT2(so, readfds, xfds);
972 }
973 LOOP_LABEL(tcp, so, so_next);
974 }
975
976 /*
977 * UDP sockets
978 */
979 STAM_COUNTER_RESET(&pData->StatUDP);
980 STAM_COUNTER_RESET(&pData->StatUDPHot);
981
982 QSOCKET_FOREACH(so, so_next, udp)
983 /* { */
984
985#ifndef VBOX_WITH_SLIRP_BSD_MBUF
986 if (pData->fmbuf_water_line == 1)
987 {
988 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
989 {
990 pData->fmbuf_water_line = 0;
991 pData->fmbuf_water_warn_sent = 0;
992 }
993# ifndef RT_OS_WINDOWS
994 poll_index = 0;
995# endif
996 goto done;
997 }
998#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
999 STAM_COUNTER_INC(&pData->StatUDP);
1000#if !defined(RT_OS_WINDOWS)
1001 so->so_poll_index = -1;
1002#endif
1003
1004 /*
1005 * See if it's timed out
1006 */
1007 if (so->so_expire)
1008 {
1009 if (so->so_expire <= curtime)
1010 {
1011 Log2(("NAT: %R[natsock] expired\n", so));
1012 if (so->so_timeout != NULL)
1013 {
1014 so->so_timeout(pData, so, so->so_timeout_arg);
1015 }
1016#ifdef VBOX_WITH_SLIRP_MT
1017 /* we need so_next for continue our cycle*/
1018 so_next = so->so_next;
1019#endif
1020 UDP_DETACH(pData, so, so_next);
1021 CONTINUE_NO_UNLOCK(udp);
1022 }
1023 else
1024 {
1025 do_slowtimo = 1; /* Let socket expire */
1026 }
1027 }
1028
1029 /*
1030 * When UDP packets are received from over the link, they're
1031 * sendto()'d straight away, so no need for setting for writing
1032 * Limit the number of packets queued by this session to 4.
1033 * Note that even though we try and limit this to 4 packets,
1034 * the session could have more queued if the packets needed
1035 * to be fragmented.
1036 *
1037 * (XXX <= 4 ?)
1038 */
1039 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
1040 {
1041 STAM_COUNTER_INC(&pData->StatUDPHot);
1042 UDP_ENGAGE_EVENT(so, readfds);
1043 }
1044 LOOP_LABEL(udp, so, so_next);
1045 }
1046done:
1047
1048#if defined(RT_OS_WINDOWS)
1049 *pnfds = VBOX_EVENT_COUNT;
1050#else /* RT_OS_WINDOWS */
1051 AssertRelease(poll_index <= *pnfds);
1052 *pnfds = poll_index;
1053#endif /* !RT_OS_WINDOWS */
1054
1055 STAM_PROFILE_STOP(&pData->StatFill, a);
1056}
1057
1058#if defined(RT_OS_WINDOWS)
1059void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1060#else /* RT_OS_WINDOWS */
1061void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1062#endif /* !RT_OS_WINDOWS */
1063{
1064 struct socket *so, *so_next;
1065 int ret;
1066#if defined(RT_OS_WINDOWS)
1067 WSANETWORKEVENTS NetworkEvents;
1068 int rc;
1069 int error;
1070#else
1071 int poll_index = 0;
1072#endif
1073
1074 STAM_PROFILE_START(&pData->StatPoll, a);
1075
1076 /* Update time */
1077 updtime(pData);
1078
1079 /*
1080 * See if anything has timed out
1081 */
1082 if (link_up)
1083 {
1084 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1085 {
1086 STAM_PROFILE_START(&pData->StatFastTimer, b);
1087 tcp_fasttimo(pData);
1088 time_fasttimo = 0;
1089 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1090 }
1091 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1092 {
1093 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1094 ip_slowtimo(pData);
1095 tcp_slowtimo(pData);
1096 last_slowtimo = curtime;
1097 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1098 }
1099 }
1100#if defined(RT_OS_WINDOWS)
1101 if (fTimeout)
1102 return; /* only timer update */
1103#endif
1104
1105 /*
1106 * Check sockets
1107 */
1108 if (!link_up)
1109 goto done;
1110#if defined(RT_OS_WINDOWS)
1111 /*XXX: before renaming please make see define
1112 * fIcmp in slirp_state.h
1113 */
1114 if (fIcmp)
1115 sorecvfrom(pData, &pData->icmp_socket);
1116#else
1117 if ( (pData->icmp_socket.s != -1)
1118 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1119 sorecvfrom(pData, &pData->icmp_socket);
1120#endif
1121 /*
1122 * Check TCP sockets
1123 */
1124 QSOCKET_FOREACH(so, so_next, tcp)
1125 /* { */
1126#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1127 if (pData->fmbuf_water_line == 1)
1128 {
1129 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1130 {
1131 pData->fmbuf_water_line = 0;
1132 pData->fmbuf_water_warn_sent = 0;
1133 }
1134 goto done;
1135 }
1136#endif
1137
1138#ifdef VBOX_WITH_SLIRP_MT
1139 if ( so->so_state & SS_NOFDREF
1140 && so->so_deleted == 1)
1141 {
1142 struct socket *son, *sop = NULL;
1143 QSOCKET_LOCK(tcb);
1144 if (so->so_next != NULL)
1145 {
1146 if (so->so_next != &tcb)
1147 SOCKET_LOCK(so->so_next);
1148 son = so->so_next;
1149 }
1150 if ( so->so_prev != &tcb
1151 && so->so_prev != NULL)
1152 {
1153 SOCKET_LOCK(so->so_prev);
1154 sop = so->so_prev;
1155 }
1156 QSOCKET_UNLOCK(tcb);
1157 remque(pData, so);
1158 NSOCK_DEC();
1159 SOCKET_UNLOCK(so);
1160 SOCKET_LOCK_DESTROY(so);
1161 RTMemFree(so);
1162 so_next = son;
1163 if (sop != NULL)
1164 SOCKET_UNLOCK(sop);
1165 CONTINUE_NO_UNLOCK(tcp);
1166 }
1167#endif
1168 /*
1169 * FD_ISSET is meaningless on these sockets
1170 * (and they can crash the program)
1171 */
1172 if (so->so_state & SS_NOFDREF || so->s == -1)
1173 CONTINUE(tcp);
1174
1175 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1176
1177 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1178
1179
1180 /*
1181 * Check for URG data
1182 * This will soread as well, so no need to
1183 * test for readfds below if this succeeds
1184 */
1185
1186 /* out-of-band data */
1187 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1188#ifdef RT_OS_DARWIN
1189 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1190 * combination on other Unixs hosts doesn't enter to this branch
1191 */
1192 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1193#endif
1194 )
1195 {
1196 sorecvoob(pData, so);
1197 }
1198
1199 /*
1200 * Check sockets for reading
1201 */
1202 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1203 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1204 {
1205 /*
1206 * Check for incoming connections
1207 */
1208 if (so->so_state & SS_FACCEPTCONN)
1209 {
1210 TCP_CONNECT(pData, so);
1211 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1212 CONTINUE(tcp);
1213 }
1214
1215 ret = soread(pData, so);
1216 /* Output it if we read something */
1217 if (RT_LIKELY(ret > 0))
1218 TCP_OUTPUT(pData, sototcpcb(so));
1219 }
1220
1221 /*
1222 * Check for FD_CLOSE events.
1223 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1224 */
1225 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1226 || (so->so_close == 1))
1227 {
1228 /*
1229 * drain the socket
1230 */
1231 for (;;)
1232 {
1233 ret = soread(pData, so);
1234 if (ret > 0)
1235 TCP_OUTPUT(pData, sototcpcb(so));
1236 else
1237 {
1238 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1239 break;
1240 }
1241 }
1242 /* mark the socket for termination _after_ it was drained */
1243 so->so_close = 1;
1244 CONTINUE(tcp);
1245 }
1246
1247 /*
1248 * Check sockets for writing
1249 */
1250 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1251 {
1252 /*
1253 * Check for non-blocking, still-connecting sockets
1254 */
1255 if (so->so_state & SS_ISFCONNECTING)
1256 {
1257 Log2(("connecting %R[natsock] catched\n", so));
1258 /* Connected */
1259 so->so_state &= ~SS_ISFCONNECTING;
1260
1261 /*
1262 * This should be probably guarded by PROBE_CONN too. Anyway,
1263 * we disable it on OS/2 because the below send call returns
1264 * EFAULT which causes the opened TCP socket to close right
1265 * after it has been opened and connected.
1266 */
1267#ifndef RT_OS_OS2
1268 ret = send(so->s, (const char *)&ret, 0, 0);
1269 if (ret < 0)
1270 {
1271 /* XXXXX Must fix, zero bytes is a NOP */
1272 if ( errno == EAGAIN
1273 || errno == EWOULDBLOCK
1274 || errno == EINPROGRESS
1275 || errno == ENOTCONN)
1276 CONTINUE(tcp);
1277
1278 /* else failed */
1279 so->so_state = SS_NOFDREF;
1280 }
1281 /* else so->so_state &= ~SS_ISFCONNECTING; */
1282#endif
1283
1284 /*
1285 * Continue tcp_input
1286 */
1287 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1288 /* continue; */
1289 }
1290 else
1291 SOWRITE(ret, pData, so);
1292 /*
1293 * XXX If we wrote something (a lot), there could be the need
1294 * for a window update. In the worst case, the remote will send
1295 * a window probe to get things going again.
1296 */
1297 }
1298
1299 /*
1300 * Probe a still-connecting, non-blocking socket
1301 * to check if it's still alive
1302 */
1303#ifdef PROBE_CONN
1304 if (so->so_state & SS_ISFCONNECTING)
1305 {
1306 ret = recv(so->s, (char *)&ret, 0, 0);
1307
1308 if (ret < 0)
1309 {
1310 /* XXX */
1311 if ( errno == EAGAIN
1312 || errno == EWOULDBLOCK
1313 || errno == EINPROGRESS
1314 || errno == ENOTCONN)
1315 {
1316 CONTINUE(tcp); /* Still connecting, continue */
1317 }
1318
1319 /* else failed */
1320 so->so_state = SS_NOFDREF;
1321
1322 /* tcp_input will take care of it */
1323 }
1324 else
1325 {
1326 ret = send(so->s, &ret, 0, 0);
1327 if (ret < 0)
1328 {
1329 /* XXX */
1330 if ( errno == EAGAIN
1331 || errno == EWOULDBLOCK
1332 || errno == EINPROGRESS
1333 || errno == ENOTCONN)
1334 {
1335 CONTINUE(tcp);
1336 }
1337 /* else failed */
1338 so->so_state = SS_NOFDREF;
1339 }
1340 else
1341 so->so_state &= ~SS_ISFCONNECTING;
1342
1343 }
1344 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1345 } /* SS_ISFCONNECTING */
1346#endif
1347 LOOP_LABEL(tcp, so, so_next);
1348 }
1349
1350 /*
1351 * Now UDP sockets.
1352 * Incoming packets are sent straight away, they're not buffered.
1353 * Incoming UDP data isn't buffered either.
1354 */
1355 QSOCKET_FOREACH(so, so_next, udp)
1356 /* { */
1357#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1358 if (pData->fmbuf_water_line == 1)
1359 {
1360 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1361 {
1362 pData->fmbuf_water_line = 0;
1363 pData->fmbuf_water_warn_sent = 0;
1364 }
1365 goto done;
1366 }
1367#endif
1368#ifdef VBOX_WITH_SLIRP_MT
1369 if ( so->so_state & SS_NOFDREF
1370 && so->so_deleted == 1)
1371 {
1372 struct socket *son, *sop = NULL;
1373 QSOCKET_LOCK(udb);
1374 if (so->so_next != NULL)
1375 {
1376 if (so->so_next != &udb)
1377 SOCKET_LOCK(so->so_next);
1378 son = so->so_next;
1379 }
1380 if ( so->so_prev != &udb
1381 && so->so_prev != NULL)
1382 {
1383 SOCKET_LOCK(so->so_prev);
1384 sop = so->so_prev;
1385 }
1386 QSOCKET_UNLOCK(udb);
1387 remque(pData, so);
1388 NSOCK_DEC();
1389 SOCKET_UNLOCK(so);
1390 SOCKET_LOCK_DESTROY(so);
1391 RTMemFree(so);
1392 so_next = son;
1393 if (sop != NULL)
1394 SOCKET_UNLOCK(sop);
1395 CONTINUE_NO_UNLOCK(udp);
1396 }
1397#endif
1398 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1399
1400 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1401
1402 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1403 {
1404 SORECVFROM(pData, so);
1405 }
1406 LOOP_LABEL(udp, so, so_next);
1407 }
1408
1409done:
1410#if 0
1411 /*
1412 * See if we can start outputting
1413 */
1414 if (if_queued && link_up)
1415 if_start(pData);
1416#endif
1417
1418 STAM_PROFILE_STOP(&pData->StatPoll, a);
1419}
1420
1421
1422struct arphdr
1423{
1424 unsigned short ar_hrd; /* format of hardware address */
1425 unsigned short ar_pro; /* format of protocol address */
1426 unsigned char ar_hln; /* length of hardware address */
1427 unsigned char ar_pln; /* length of protocol address */
1428 unsigned short ar_op; /* ARP opcode (command) */
1429
1430 /*
1431 * Ethernet looks like this : This bit is variable sized however...
1432 */
1433 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1434 unsigned char ar_sip[4]; /* sender IP address */
1435 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1436 unsigned char ar_tip[4]; /* target IP address */
1437};
1438AssertCompileSize(struct arphdr, 28);
1439
1440static void arp_input(PNATState pData, struct mbuf *m)
1441{
1442 struct ethhdr *eh;
1443 struct ethhdr *reh;
1444 struct arphdr *ah;
1445 struct arphdr *rah;
1446 int ar_op;
1447 uint32_t htip;
1448 uint32_t tip;
1449 struct mbuf *mr;
1450 eh = mtod(m, struct ethhdr *);
1451 ah = (struct arphdr *)&eh[1];
1452 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1453 tip = *(uint32_t*)ah->ar_tip;
1454
1455 ar_op = RT_N2H_U16(ah->ar_op);
1456
1457 switch (ar_op)
1458 {
1459 case ARPOP_REQUEST:
1460#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1461 mr = m_get(pData);
1462
1463 reh = mtod(mr, struct ethhdr *);
1464 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1465 Log4(("NAT: arp:%R[ether]->%R[ether]\n",
1466 reh->h_source, reh->h_dest));
1467 Log4(("NAT: arp: %R[IP4]\n", &tip));
1468
1469 mr->m_data += if_maxlinkhdr;
1470 mr->m_len = sizeof(struct arphdr);
1471 rah = mtod(mr, struct arphdr *);
1472#else
1473 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1474 if (mr == NULL)
1475 return;
1476 reh = mtod(mr, struct ethhdr *);
1477 mr->m_data += ETH_HLEN;
1478 rah = mtod(mr, struct arphdr *);
1479 mr->m_len = sizeof(struct arphdr);
1480 Assert(mr);
1481 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1482#endif
1483#ifdef VBOX_WITH_NAT_SERVICE
1484 if (tip == pData->special_addr.s_addr)
1485 goto arp_ok;
1486#endif
1487 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1488 {
1489 if ( CTL_CHECK(htip, CTL_DNS)
1490 || CTL_CHECK(htip, CTL_ALIAS)
1491 || CTL_CHECK(htip, CTL_TFTP))
1492 goto arp_ok;
1493 m_freem(pData, m);
1494 m_freem(pData, mr);
1495 return;
1496
1497 arp_ok:
1498 rah->ar_hrd = RT_H2N_U16_C(1);
1499 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1500 rah->ar_hln = ETH_ALEN;
1501 rah->ar_pln = 4;
1502 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1503 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1504
1505 switch (htip & ~pData->netmask)
1506 {
1507 case CTL_DNS:
1508 case CTL_ALIAS:
1509 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1510 break;
1511 default:;
1512 }
1513
1514 memcpy(rah->ar_sip, ah->ar_tip, 4);
1515 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1516 memcpy(rah->ar_tip, ah->ar_sip, 4);
1517 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1518 m_freem(pData, m);
1519 }
1520 /* Gratuitous ARP */
1521 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1522 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1523 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1524 {
1525 /* we've received anounce about address asignment
1526 * Let's do ARP cache update
1527 */
1528 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1529 {
1530 m_freem(pData, mr);
1531 m_freem(pData, m);
1532 break;
1533 }
1534 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1535 }
1536 break;
1537
1538 case ARPOP_REPLY:
1539 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1540 {
1541 m_freem(pData, m);
1542 break;
1543 }
1544 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1545 m_freem(pData, m);
1546 break;
1547
1548 default:
1549 break;
1550 }
1551}
1552
1553/**
1554 * Feed a packet into the slirp engine.
1555 *
1556 * @param m Data buffer, m_len is not valid.
1557 * @param cbBuf The length of the data in m.
1558 */
1559void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1560{
1561 int proto;
1562 static bool fWarnedIpv6;
1563 struct ethhdr *eh;
1564 uint8_t au8Ether[ETH_ALEN];
1565
1566 m->m_len = cbBuf;
1567 if (cbBuf < ETH_HLEN)
1568 {
1569 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1570 m_freem(pData, m);
1571 return;
1572 }
1573 eh = mtod(m, struct ethhdr *);
1574 proto = RT_N2H_U16(eh->h_proto);
1575
1576 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1577
1578 switch(proto)
1579 {
1580 case ETH_P_ARP:
1581 arp_input(pData, m);
1582 break;
1583
1584 case ETH_P_IP:
1585 /* Update time. Important if the network is very quiet, as otherwise
1586 * the first outgoing connection gets an incorrect timestamp. */
1587 updtime(pData);
1588 m_adj(m, ETH_HLEN);
1589#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1590 M_ASSERTPKTHDR(m);
1591 m->m_pkthdr.header = mtod(m, void *);
1592#else /* !VBOX_WITH_SLIRP_BSD_MBUF */
1593 if ( pData->fmbuf_water_line
1594 && pData->fmbuf_water_warn_sent == 0
1595 && (curtime - pData->tsmbuf_water_warn_sent) > 500)
1596 {
1597 icmp_error(pData, m, ICMP_SOURCEQUENCH, 0, 0, "Out of resources!!!");
1598 pData->fmbuf_water_warn_sent = 1;
1599 pData->tsmbuf_water_warn_sent = curtime;
1600 }
1601#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
1602 ip_input(pData, m);
1603 break;
1604
1605 case ETH_P_IPV6:
1606 m_freem(pData, m);
1607 if (!fWarnedIpv6)
1608 {
1609 LogRel(("NAT: IPv6 not supported\n"));
1610 fWarnedIpv6 = true;
1611 }
1612 break;
1613
1614 default:
1615 Log(("NAT: Unsupported protocol %x\n", proto));
1616 m_freem(pData, m);
1617 break;
1618 }
1619
1620 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1621 activate_port_forwarding(pData, au8Ether);
1622}
1623
1624/* output the IP packet to the ethernet device */
1625void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1626{
1627 struct ethhdr *eh;
1628 uint8_t *buf = NULL;
1629 size_t mlen = 0;
1630 STAM_PROFILE_START(&pData->StatIF_encap, a);
1631
1632#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1633 m->m_data -= if_maxlinkhdr;
1634 m->m_len += ETH_HLEN;
1635 eh = mtod(m, struct ethhdr *);
1636
1637 if (MBUF_HEAD(m) != m->m_data)
1638 {
1639 LogRel(("NAT: ethernet detects corruption of the packet"));
1640 AssertMsgFailed(("!!Ethernet frame corrupted!!"));
1641 }
1642#else
1643 M_ASSERTPKTHDR(m);
1644 m->m_data -= ETH_HLEN;
1645 m->m_len += ETH_HLEN;
1646 eh = mtod(m, struct ethhdr *);
1647#endif
1648
1649 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1650 {
1651 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1652 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1653 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1654 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1655 {
1656 /* don't do anything */
1657 m_freem(pData, m);
1658 goto done;
1659 }
1660 }
1661#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1662 mlen = m->m_len;
1663#else
1664 mlen = m_length(m, NULL);
1665 buf = RTMemAlloc(mlen);
1666 if (buf == NULL)
1667 {
1668 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1669 m_freem(pData, m);
1670 goto done;
1671 }
1672#endif
1673 eh->h_proto = RT_H2N_U16(eth_proto);
1674#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1675 m_copydata(m, 0, mlen, (char *)buf);
1676 if (flags & ETH_ENCAP_URG)
1677 slirp_urg_output(pData->pvUser, m, buf, mlen);
1678 else
1679 slirp_output(pData->pvUser, m, buf, mlen);
1680#else
1681 if (flags & ETH_ENCAP_URG)
1682 slirp_urg_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1683 else
1684 slirp_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1685#endif
1686done:
1687 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1688}
1689
1690/**
1691 * Still we're using dhcp server leasing to map ether to IP
1692 * @todo see rt_lookup_in_cache
1693 */
1694static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1695{
1696 uint32_t ip = INADDR_ANY;
1697 int rc;
1698
1699 if (eth_addr == NULL)
1700 return INADDR_ANY;
1701
1702 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1703 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1704 return INADDR_ANY;
1705
1706 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1707 if (RT_SUCCESS(rc))
1708 return ip;
1709
1710 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1711 /* ignore return code, ip will be set to INADDR_ANY on error */
1712 return ip;
1713}
1714
1715/**
1716 * We need check if we've activated port forwarding
1717 * for specific machine ... that of course relates to
1718 * service mode
1719 * @todo finish this for service case
1720 */
1721static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1722{
1723 struct port_forward_rule *rule;
1724
1725 /* check mac here */
1726 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1727 {
1728 struct socket *so;
1729 struct alias_link *alias_link;
1730 struct libalias *lib;
1731 int flags;
1732 struct sockaddr sa;
1733 struct sockaddr_in *psin;
1734 socklen_t socketlen;
1735 struct in_addr alias;
1736 int rc;
1737 uint32_t guest_addr; /* need to understand if we already give address to guest */
1738
1739 if (rule->activated)
1740 continue;
1741
1742#ifdef VBOX_WITH_NAT_SERVICE
1743 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1744 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1745 guest_addr = find_guest_ip(pData, h_source);
1746#else
1747#if 0
1748 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1749 continue;
1750#endif
1751 guest_addr = find_guest_ip(pData, h_source);
1752#endif
1753 if (guest_addr == INADDR_ANY)
1754 {
1755 /* the address wasn't granted */
1756 return;
1757 }
1758
1759#if !defined(VBOX_WITH_NAT_SERVICE)
1760 if (rule->guest_addr.s_addr != guest_addr)
1761 continue;
1762#endif
1763
1764 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1765 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1766 rule->host_port, rule->guest_port, &guest_addr));
1767
1768 if (rule->proto == IPPROTO_UDP)
1769 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1770 RT_H2N_U16(rule->guest_port), 0);
1771 else
1772 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1773 RT_H2N_U16(rule->guest_port), 0);
1774
1775 if (so == NULL)
1776 goto remove_port_forwarding;
1777
1778 psin = (struct sockaddr_in *)&sa;
1779 psin->sin_family = AF_INET;
1780 psin->sin_port = 0;
1781 psin->sin_addr.s_addr = INADDR_ANY;
1782 socketlen = sizeof(struct sockaddr);
1783
1784 rc = getsockname(so->s, &sa, &socketlen);
1785 if (rc < 0 || sa.sa_family != AF_INET)
1786 goto remove_port_forwarding;
1787
1788 psin = (struct sockaddr_in *)&sa;
1789
1790 lib = LibAliasInit(pData, NULL);
1791 flags = LibAliasSetMode(lib, 0, 0);
1792 flags |= pData->i32AliasMode;
1793 flags |= PKT_ALIAS_REVERSE; /* set reverse */
1794 flags = LibAliasSetMode(lib, flags, ~0);
1795
1796 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1797 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1798 alias, RT_H2N_U16(rule->guest_port),
1799 pData->special_addr, -1, /* not very clear for now */
1800 rule->proto);
1801 if (!alias_link)
1802 goto remove_port_forwarding;
1803
1804 so->so_la = lib;
1805 rule->activated = 1;
1806 pData->cRedirectionsActive++;
1807 continue;
1808
1809 remove_port_forwarding:
1810 LogRel(("NAT: failed to redirect %s %d => %d\n",
1811 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1812 LIST_REMOVE(rule, list);
1813 pData->cRedirectionsStored--;
1814 RTMemFree(rule);
1815 }
1816}
1817
1818/**
1819 * Changes in 3.1 instead of opening new socket do the following:
1820 * gain more information:
1821 * 1. bind IP
1822 * 2. host port
1823 * 3. guest port
1824 * 4. proto
1825 * 5. guest MAC address
1826 * the guest's MAC address is rather important for service, but we easily
1827 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1828 * corresponding port-forwarding
1829 */
1830int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1831 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1832{
1833 struct port_forward_rule *rule = NULL;
1834 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1835
1836 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1837 if (rule == NULL)
1838 return 1;
1839
1840 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1841 rule->host_port = host_port;
1842 rule->guest_port = guest_port;
1843#ifndef VBOX_WITH_NAT_SERVICE
1844 rule->guest_addr.s_addr = guest_addr.s_addr;
1845#endif
1846 rule->bind_ip.s_addr = host_addr.s_addr;
1847 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1848 /* @todo add mac address */
1849 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1850 pData->cRedirectionsStored++;
1851 return 0;
1852}
1853
1854void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1855{
1856#ifndef VBOX_WITH_NAT_SERVICE
1857 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1858#endif
1859 if (GuestIP != INADDR_ANY)
1860 {
1861 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1862 activate_port_forwarding(pData, ethaddr);
1863 }
1864}
1865
1866#if defined(RT_OS_WINDOWS)
1867HANDLE *slirp_get_events(PNATState pData)
1868{
1869 return pData->phEvents;
1870}
1871void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1872{
1873 pData->phEvents[index] = hEvent;
1874}
1875#endif
1876
1877unsigned int slirp_get_timeout_ms(PNATState pData)
1878{
1879 if (link_up)
1880 {
1881 if (time_fasttimo)
1882 return 2;
1883 if (do_slowtimo)
1884 return 500; /* see PR_SLOWHZ */
1885 }
1886 return 3600*1000; /* one hour */
1887}
1888
1889#ifndef RT_OS_WINDOWS
1890int slirp_get_nsock(PNATState pData)
1891{
1892 return pData->nsock;
1893}
1894#endif
1895
1896/*
1897 * this function called from NAT thread
1898 */
1899void slirp_post_sent(PNATState pData, void *pvArg)
1900{
1901 struct socket *so = 0;
1902 struct tcpcb *tp = 0;
1903 struct mbuf *m = (struct mbuf *)pvArg;
1904 m_freem(pData, m);
1905}
1906#ifdef VBOX_WITH_SLIRP_MT
1907void slirp_process_queue(PNATState pData)
1908{
1909 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1910}
1911void *slirp_get_queue(PNATState pData)
1912{
1913 return pData->pReqQueue;
1914}
1915#endif
1916
1917void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1918{
1919 Log2(("tftp_prefix:%s\n", tftpPrefix));
1920 tftp_prefix = tftpPrefix;
1921}
1922
1923void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1924{
1925 Log2(("bootFile:%s\n", bootFile));
1926 bootp_filename = bootFile;
1927}
1928
1929void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1930{
1931 Log2(("next_server:%s\n", next_server));
1932 if (next_server == NULL)
1933 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1934 else
1935 inet_aton(next_server, &pData->tftp_server);
1936}
1937
1938int slirp_set_binding_address(PNATState pData, char *addr)
1939{
1940 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1941 {
1942 pData->bindIP.s_addr = INADDR_ANY;
1943 return 1;
1944 }
1945 return 0;
1946}
1947
1948void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1949{
1950 if (!pData->fUseHostResolver)
1951 {
1952 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1953 pData->fUseDnsProxy = fDNSProxy;
1954 }
1955 else
1956 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1957}
1958
1959#define CHECK_ARG(name, val, lim_min, lim_max) \
1960 do { \
1961 if ((val) < (lim_min) || (val) > (lim_max)) \
1962 { \
1963 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1964 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1965 return; \
1966 } \
1967 else \
1968 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1969 } while (0)
1970
1971/* don't allow user set less 8kB and more than 1M values */
1972#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1973void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1974{
1975 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1976 pData->socket_rcv = kilobytes;
1977}
1978void slirp_set_sndbuf(PNATState pData, int kilobytes)
1979{
1980 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1981 pData->socket_snd = kilobytes * _1K;
1982}
1983void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1984{
1985 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1986 tcp_rcvspace = kilobytes * _1K;
1987}
1988void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1989{
1990 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1991 tcp_sndspace = kilobytes * _1K;
1992}
1993
1994/*
1995 * Looking for Ether by ip in ARP-cache
1996 * Note: it´s responsible of caller to allocate buffer for result
1997 * @returns iprt status code
1998 */
1999int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
2000{
2001 struct arp_cache_entry *ac;
2002
2003 if (ether == NULL)
2004 return VERR_INVALID_PARAMETER;
2005
2006 if (LIST_EMPTY(&pData->arp_cache))
2007 return VERR_NOT_FOUND;
2008
2009 LIST_FOREACH(ac, &pData->arp_cache, list)
2010 {
2011 if (ac->ip == ip)
2012 {
2013 memcpy(ether, ac->ether, ETH_ALEN);
2014 return VINF_SUCCESS;
2015 }
2016 }
2017 return VERR_NOT_FOUND;
2018}
2019
2020/*
2021 * Looking for IP by Ether in ARP-cache
2022 * Note: it´s responsible of caller to allocate buffer for result
2023 * @returns 0 - if found, 1 - otherwise
2024 */
2025int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
2026{
2027 struct arp_cache_entry *ac;
2028 *ip = INADDR_ANY;
2029
2030 if (LIST_EMPTY(&pData->arp_cache))
2031 return VERR_NOT_FOUND;
2032
2033 LIST_FOREACH(ac, &pData->arp_cache, list)
2034 {
2035 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
2036 {
2037 *ip = ac->ip;
2038 return VINF_SUCCESS;
2039 }
2040 }
2041 return VERR_NOT_FOUND;
2042}
2043
2044void slirp_arp_who_has(PNATState pData, uint32_t dst)
2045{
2046 struct mbuf *m;
2047 struct ethhdr *ehdr;
2048 struct arphdr *ahdr;
2049
2050#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2051 m = m_get(pData);
2052#else
2053 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
2054#endif
2055 if (m == NULL)
2056 {
2057 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
2058 return;
2059 }
2060 ehdr = mtod(m, struct ethhdr *);
2061 memset(ehdr->h_source, 0xff, ETH_ALEN);
2062 ahdr = (struct arphdr *)&ehdr[1];
2063 ahdr->ar_hrd = RT_H2N_U16_C(1);
2064 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2065 ahdr->ar_hln = ETH_ALEN;
2066 ahdr->ar_pln = 4;
2067 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2068 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2069 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2070 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2071 *(uint32_t *)ahdr->ar_tip = dst;
2072#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2073 m->m_data += if_maxlinkhdr;
2074 m->m_len = sizeof(struct arphdr);
2075#else
2076 /* warn!!! should falls in mbuf minimal size */
2077 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2078 m->m_data += ETH_HLEN;
2079 m->m_len -= ETH_HLEN;
2080#endif
2081 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2082}
2083
2084int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2085{
2086 if (slirp_arp_cache_update(pData, dst, mac))
2087 slirp_arp_cache_add(pData, dst, mac);
2088
2089 return 0;
2090}
2091
2092/* updates the arp cache
2093 * @returns 0 - if has found and updated
2094 * 1 - if hasn't found.
2095 */
2096int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2097{
2098 struct arp_cache_entry *ac;
2099 LIST_FOREACH(ac, &pData->arp_cache, list)
2100 {
2101 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
2102 {
2103 ac->ip = dst;
2104 return 0;
2105 }
2106 }
2107 return 1;
2108}
2109
2110void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2111{
2112 struct arp_cache_entry *ac = NULL;
2113 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2114 if (ac == NULL)
2115 {
2116 LogRel(("NAT: Can't allocate arp cache entry\n"));
2117 return;
2118 }
2119 ac->ip = ip;
2120 memcpy(ac->ether, ether, ETH_ALEN);
2121 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2122}
2123
2124#ifdef VBOX_WITH_SLIRP_BSD_MBUF
2125void slirp_set_mtu(PNATState pData, int mtu)
2126{
2127 if (mtu < 20 || mtu >= 16000)
2128 {
2129 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2130 mtu = 1500;
2131 }
2132 if_mtu =
2133 if_mru = mtu;
2134}
2135#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette