VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 36352

Last change on this file since 36352 was 36352, checked in by vboxsync, 14 years ago

NAT: for gratuitous ARP processing pair ar_sip, ar_sha is used.
NAT now is reporting what pair is used for addressing for statically address assigned guests.
It's also prevent adding broadcast ethernet address as one of element of pair.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.5 KB
Line 
1/* $Id: slirp.c 36352 2011-03-23 04:58:16Z vboxsync $ */
2/** @file
3 * NAT - slirp glue.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * libslirp glue
22 *
23 * Copyright (c) 2004-2008 Fabrice Bellard
24 *
25 * Permission is hereby granted, free of charge, to any person obtaining a copy
26 * of this software and associated documentation files (the "Software"), to deal
27 * in the Software without restriction, including without limitation the rights
28 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29 * copies of the Software, and to permit persons to whom the Software is
30 * furnished to do so, subject to the following conditions:
31 *
32 * The above copyright notice and this permission notice shall be included in
33 * all copies or substantial portions of the Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41 * THE SOFTWARE.
42 */
43
44#include "slirp.h"
45#ifdef RT_OS_OS2
46# include <paths.h>
47#endif
48
49#include <VBox/err.h>
50#include <VBox/vmm/pdmdrv.h>
51#include <iprt/assert.h>
52#include <iprt/file.h>
53#ifndef RT_OS_WINDOWS
54# include <sys/ioctl.h>
55# include <poll.h>
56#else
57# include <Winnls.h>
58# define _WINSOCK2API_
59# include <IPHlpApi.h>
60#endif
61#include <alias.h>
62
63#ifndef RT_OS_WINDOWS
64
65# define DO_ENGAGE_EVENT1(so, fdset, label) \
66 do { \
67 if ( so->so_poll_index != -1 \
68 && so->s == polls[so->so_poll_index].fd) \
69 { \
70 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
71 break; \
72 } \
73 AssertRelease(poll_index < (nfds)); \
74 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
75 polls[poll_index].fd = (so)->s; \
76 (so)->so_poll_index = poll_index; \
77 polls[poll_index].events = N_(fdset ## _poll); \
78 polls[poll_index].revents = 0; \
79 poll_index++; \
80 } while (0)
81
82# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
83 do { \
84 if ( so->so_poll_index != -1 \
85 && so->s == polls[so->so_poll_index].fd) \
86 { \
87 polls[so->so_poll_index].events |= \
88 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
89 break; \
90 } \
91 AssertRelease(poll_index < (nfds)); \
92 polls[poll_index].fd = (so)->s; \
93 (so)->so_poll_index = poll_index; \
94 polls[poll_index].events = \
95 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
96 poll_index++; \
97 } while (0)
98
99# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
100
101/*
102 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
103 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
104 * used to catch POLLNVAL while logging and return false in case of error while
105 * normal usage.
106 */
107# define DO_CHECK_FD_SET(so, events, fdset) \
108 ( ((so)->so_poll_index != -1) \
109 && ((so)->so_poll_index <= ndfs) \
110 && ((so)->s == polls[so->so_poll_index].fd) \
111 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
112 && ( N_(fdset ## _poll) == POLLNVAL \
113 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
114
115 /* specific for Unix API */
116# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
117 /* specific for Windows Winsock API */
118# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
119
120# ifndef RT_OS_LINUX
121# define readfds_poll (POLLRDNORM)
122# define writefds_poll (POLLWRNORM)
123# else
124# define readfds_poll (POLLIN)
125# define writefds_poll (POLLOUT)
126# endif
127# define xfds_poll (POLLPRI)
128# define closefds_poll (POLLHUP)
129# define rderr_poll (POLLERR)
130# define rdhup_poll (POLLHUP)
131# define nval_poll (POLLNVAL)
132
133# define ICMP_ENGAGE_EVENT(so, fdset) \
134 do { \
135 if (pData->icmp_socket.s != -1) \
136 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
137 } while (0)
138
139#else /* RT_OS_WINDOWS */
140
141/*
142 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
143 * So no call to WSAEventSelect necessary.
144 */
145# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
146
147/*
148 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
149 */
150# define DO_ENGAGE_EVENT1(so, fdset1, label) \
151 do { \
152 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
153 if (rc == SOCKET_ERROR) \
154 { \
155 /* This should not happen */ \
156 error = WSAGetLastError(); \
157 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
158 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
159 } \
160 } while (0); \
161 CONTINUE(label)
162
163# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
164 DO_ENGAGE_EVENT1((so), (fdset1), label)
165
166# define DO_POLL_EVENTS(rc, error, so, events, label) \
167 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
168 if ((rc) == SOCKET_ERROR) \
169 { \
170 (error) = WSAGetLastError(); \
171 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
172 CONTINUE(label); \
173 }
174
175# define acceptds_win FD_ACCEPT
176# define acceptds_win_bit FD_ACCEPT_BIT
177# define readfds_win FD_READ
178# define readfds_win_bit FD_READ_BIT
179# define writefds_win FD_WRITE
180# define writefds_win_bit FD_WRITE_BIT
181# define xfds_win FD_OOB
182# define xfds_win_bit FD_OOB_BIT
183# define closefds_win FD_CLOSE
184# define closefds_win_bit FD_CLOSE_BIT
185
186# define closefds_win FD_CLOSE
187# define closefds_win_bit FD_CLOSE_BIT
188
189# define DO_CHECK_FD_SET(so, events, fdset) \
190 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
191
192# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
193# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
194
195#endif /* RT_OS_WINDOWS */
196
197#define TCP_ENGAGE_EVENT1(so, fdset) \
198 DO_ENGAGE_EVENT1((so), fdset, tcp)
199
200#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
201 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
202
203#define UDP_ENGAGE_EVENT(so, fdset) \
204 DO_ENGAGE_EVENT1((so), fdset, udp)
205
206#define POLL_TCP_EVENTS(rc, error, so, events) \
207 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
208
209#define POLL_UDP_EVENTS(rc, error, so, events) \
210 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
211
212#define CHECK_FD_SET(so, events, set) \
213 (DO_CHECK_FD_SET((so), (events), set))
214
215#define WIN_CHECK_FD_SET(so, events, set) \
216 (DO_WIN_CHECK_FD_SET((so), (events), set))
217
218#define UNIX_CHECK_FD_SET(so, events, set) \
219 (DO_UNIX_CHECK_FD_SET(so, events, set))
220
221/*
222 * Loging macros
223 */
224#if VBOX_WITH_DEBUG_NAT_SOCKETS
225# if defined(RT_OS_WINDOWS)
226# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
227 do { \
228 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
229 } while (0)
230# else /* !RT_OS_WINDOWS */
231# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
232 do { \
233 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
234 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
235 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
236 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
237 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
238 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
239 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
240 } while (0)
241# endif /* !RT_OS_WINDOWS */
242#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
243# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
244#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
245
246#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
247 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
248
249static void activate_port_forwarding(PNATState, const uint8_t *pEther);
250
251static const uint8_t special_ethaddr[6] =
252{
253 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
254};
255
256static const uint8_t broadcast_ethaddr[6] =
257{
258 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
259};
260
261const uint8_t zerro_ethaddr[6] =
262{
263 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
264};
265
266#ifdef RT_OS_WINDOWS
267static int get_dns_addr_domain(PNATState pData, bool fVerbose,
268 struct in_addr *pdns_addr,
269 const char **ppszDomain)
270{
271 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
272 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
273 PIP_ADAPTER_ADDRESSES pAddr = NULL;
274 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
275 ULONG size;
276 int wlen = 0;
277 char *pszSuffix;
278 struct dns_domain_entry *pDomain = NULL;
279 ULONG ret = ERROR_SUCCESS;
280
281 /* @todo add SKIPing flags to get only required information */
282
283 /* determine size of buffer */
284 size = 0;
285 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
286 if (ret != ERROR_BUFFER_OVERFLOW)
287 {
288 Log(("NAT: error %lu occurred on capacity detection operation\n", ret));
289 return -1;
290 }
291 if (size == 0)
292 {
293 Log(("NAT: Win socket API returns non capacity\n"));
294 return -1;
295 }
296
297 pAdapterAddr = RTMemAllocZ(size);
298 if (!pAdapterAddr)
299 {
300 Log(("NAT: No memory available\n"));
301 return -1;
302 }
303 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
304 if (ret != ERROR_SUCCESS)
305 {
306 Log(("NAT: error %lu occurred on fetching adapters info\n", ret));
307 RTMemFree(pAdapterAddr);
308 return -1;
309 }
310
311 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
312 {
313 int found;
314 if (pAddr->OperStatus != IfOperStatusUp)
315 continue;
316
317 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
318 {
319 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
320 struct in_addr InAddr;
321 struct dns_entry *pDns;
322
323 if (SockAddr->sa_family != AF_INET)
324 continue;
325
326 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
327
328 /* add dns server to list */
329 pDns = RTMemAllocZ(sizeof(struct dns_entry));
330 if (!pDns)
331 {
332 Log(("NAT: Can't allocate buffer for DNS entry\n"));
333 RTMemFree(pAdapterAddr);
334 return VERR_NO_MEMORY;
335 }
336
337 Log(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
338 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
339 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
340 else
341 pDns->de_addr.s_addr = InAddr.s_addr;
342
343 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
344
345 if (pAddr->DnsSuffix == NULL)
346 continue;
347
348 /* uniq */
349 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
350 if (!pszSuffix || strlen(pszSuffix) == 0)
351 {
352 RTStrFree(pszSuffix);
353 continue;
354 }
355
356 found = 0;
357 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
358 {
359 if ( pDomain->dd_pszDomain != NULL
360 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
361 {
362 found = 1;
363 RTStrFree(pszSuffix);
364 break;
365 }
366 }
367 if (!found)
368 {
369 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
370 if (!pDomain)
371 {
372 Log(("NAT: not enough memory\n"));
373 RTStrFree(pszSuffix);
374 RTMemFree(pAdapterAddr);
375 return VERR_NO_MEMORY;
376 }
377 pDomain->dd_pszDomain = pszSuffix;
378 Log(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
379 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
380 }
381 }
382 }
383 RTMemFree(pAdapterAddr);
384 return 0;
385}
386
387#else /* !RT_OS_WINDOWS */
388
389static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
390{
391 size_t cbRead;
392 char bTest;
393 int rc = VERR_NO_MEMORY;
394 char *pu8Buf = (char *)pvBuf;
395 *pcbRead = 0;
396
397 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
398 && (pu8Buf - (char *)pvBuf) < cbBufSize)
399 {
400 if (cbRead == 0)
401 return VERR_EOF;
402
403 if (bTest == '\r' || bTest == '\n')
404 {
405 *pu8Buf = 0;
406 return VINF_SUCCESS;
407 }
408 *pu8Buf = bTest;
409 pu8Buf++;
410 (*pcbRead)++;
411 }
412 return rc;
413}
414
415static int get_dns_addr_domain(PNATState pData, bool fVerbose,
416 struct in_addr *pdns_addr,
417 const char **ppszDomain)
418{
419 char buff[512];
420 char buff2[256];
421 RTFILE f;
422 int cNameserversFound = 0;
423 bool fWarnTooManyDnsServers = false;
424 struct in_addr tmp_addr;
425 int rc;
426 size_t bytes;
427
428# ifdef RT_OS_OS2
429 /* Try various locations. */
430 char *etc = getenv("ETC");
431 if (etc)
432 {
433 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
434 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
435 }
436 if (RT_FAILURE(rc))
437 {
438 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
439 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
440 }
441 if (RT_FAILURE(rc))
442 {
443 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
444 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
445 }
446# else /* !RT_OS_OS2 */
447# ifndef DEBUG_vvl
448 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
449# else
450 char *home = getenv("HOME");
451 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
452 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
453 if (RT_SUCCESS(rc))
454 {
455 Log(("NAT: DNS we're using %s\n", buff));
456 }
457 else
458 {
459 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
460 Log(("NAT: DNS we're using %s\n", buff));
461 }
462# endif
463# endif /* !RT_OS_OS2 */
464 if (RT_FAILURE(rc))
465 return -1;
466
467 if (ppszDomain)
468 *ppszDomain = NULL;
469
470 Log(("NAT: DNS Servers:\n"));
471 while ( RT_SUCCESS(rc = RTFileGets(f, buff, sizeof(buff), &bytes))
472 && rc != VERR_EOF)
473 {
474 struct dns_entry *pDns = NULL;
475 if ( cNameserversFound == 4
476 && !fWarnTooManyDnsServers
477 && sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1)
478 {
479 fWarnTooManyDnsServers = true;
480 LogRel(("NAT: too many nameservers registered.\n"));
481 }
482 if ( sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1
483 && cNameserversFound < 4) /* Unix doesn't accept more than 4 name servers*/
484 {
485 if (!inet_aton(buff2, &tmp_addr))
486 continue;
487
488 /* localhost mask */
489 pDns = RTMemAllocZ(sizeof (struct dns_entry));
490 if (!pDns)
491 {
492 Log(("can't alloc memory for DNS entry\n"));
493 return -1;
494 }
495
496 /* check */
497 pDns->de_addr.s_addr = tmp_addr.s_addr;
498 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
499 {
500 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
501 }
502 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
503 cNameserversFound++;
504 }
505 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
506 {
507 char *tok;
508 char *saveptr;
509 struct dns_domain_entry *pDomain = NULL;
510 int fFoundDomain = 0;
511 tok = strtok_r(&buff[6], " \t\n", &saveptr);
512 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
513 {
514 if ( tok != NULL
515 && strcmp(tok, pDomain->dd_pszDomain) == 0)
516 {
517 fFoundDomain = 1;
518 break;
519 }
520 }
521 if (tok != NULL && !fFoundDomain)
522 {
523 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
524 if (!pDomain)
525 {
526 Log(("NAT: not enought memory to add domain list\n"));
527 return VERR_NO_MEMORY;
528 }
529 pDomain->dd_pszDomain = RTStrDup(tok);
530 Log(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
531 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
532 }
533 }
534 }
535 RTFileClose(f);
536 if (!cNameserversFound)
537 return -1;
538 return 0;
539}
540
541#endif /* !RT_OS_WINDOWS */
542
543int slirp_init_dns_list(PNATState pData)
544{
545 TAILQ_INIT(&pData->pDnsList);
546 LIST_INIT(&pData->pDomainList);
547 return get_dns_addr_domain(pData, true, NULL, NULL);
548}
549
550void slirp_release_dns_list(PNATState pData)
551{
552 struct dns_entry *pDns = NULL;
553 struct dns_domain_entry *pDomain = NULL;
554
555 while (!TAILQ_EMPTY(&pData->pDnsList))
556 {
557 pDns = TAILQ_FIRST(&pData->pDnsList);
558 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
559 RTMemFree(pDns);
560 }
561
562 while (!LIST_EMPTY(&pData->pDomainList))
563 {
564 pDomain = LIST_FIRST(&pData->pDomainList);
565 LIST_REMOVE(pDomain, dd_list);
566 if (pDomain->dd_pszDomain != NULL)
567 RTStrFree(pDomain->dd_pszDomain);
568 RTMemFree(pDomain);
569 }
570}
571
572int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
573{
574 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
575}
576
577int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
578 bool fPassDomain, bool fUseHostResolver, int i32AliasMode, void *pvUser)
579{
580 int fNATfailed = 0;
581 int rc;
582 PNATState pData = RTMemAllocZ(sizeof(NATState));
583 *ppData = pData;
584 if (!pData)
585 return VERR_NO_MEMORY;
586 if (u32Netmask & 0x1f)
587 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
588 return VERR_INVALID_PARAMETER;
589 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
590 pData->fUseHostResolver = fUseHostResolver;
591 pData->pvUser = pvUser;
592 pData->netmask = u32Netmask;
593
594 /* sockets & TCP defaults */
595 pData->socket_rcv = 64 * _1K;
596 pData->socket_snd = 64 * _1K;
597 tcp_sndspace = 64 * _1K;
598 tcp_rcvspace = 64 * _1K;
599
600#ifdef RT_OS_WINDOWS
601 {
602 WSADATA Data;
603 WSAStartup(MAKEWORD(2, 0), &Data);
604 }
605 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
606#endif
607#ifdef VBOX_WITH_SLIRP_MT
608 QSOCKET_LOCK_CREATE(tcb);
609 QSOCKET_LOCK_CREATE(udb);
610 rc = RTReqCreateQueue(&pData->pReqQueue);
611 AssertReleaseRC(rc);
612#endif
613
614 link_up = 1;
615
616 rc = bootp_dhcp_init(pData);
617 if (rc != 0)
618 {
619 Log(("NAT: DHCP server initialization was failed\n"));
620 return VINF_NAT_DNS;
621 }
622 debug_init();
623 if_init(pData);
624 ip_init(pData);
625 icmp_init(pData);
626
627 /* Initialise mbufs *after* setting the MTU */
628 mbuf_init(pData);
629
630 pData->special_addr.s_addr = u32NetAddr;
631 pData->slirp_ethaddr = &special_ethaddr[0];
632 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
633 /* @todo: add ability to configure this staff */
634
635 /* set default addresses */
636 inet_aton("127.0.0.1", &loopback_addr);
637 if (!pData->fUseHostResolver)
638 {
639 if (slirp_init_dns_list(pData) < 0)
640 fNATfailed = 1;
641
642 dnsproxy_init(pData);
643 }
644 if (i32AliasMode & ~(PKT_ALIAS_LOG|PKT_ALIAS_SAME_PORTS|PKT_ALIAS_PROXY_ONLY))
645 {
646 Log(("NAT: alias mode %x is ignored\n", i32AliasMode));
647 i32AliasMode = 0;
648 }
649 pData->i32AliasMode = i32AliasMode;
650 getouraddr(pData);
651 {
652 int flags = 0;
653 struct in_addr proxy_addr;
654 pData->proxy_alias = LibAliasInit(pData, NULL);
655 if (pData->proxy_alias == NULL)
656 {
657 Log(("NAT: LibAlias default rule wasn't initialized\n"));
658 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
659 }
660 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
661#ifndef NO_FW_PUNCH
662 flags |= PKT_ALIAS_PUNCH_FW;
663#endif
664 flags |= pData->i32AliasMode; /* do transparent proxying */
665 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
666 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
667 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
668 ftp_alias_load(pData);
669 nbt_alias_load(pData);
670 if (pData->fUseHostResolver)
671 dns_alias_load(pData);
672 }
673 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
674}
675
676/**
677 * Register statistics.
678 */
679void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
680{
681#ifdef VBOX_WITH_STATISTICS
682# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
683# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
684# include "counters.h"
685# undef COUNTER
686/** @todo register statistics for the variables dumped by:
687 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
688 * mbufstats(pData); sockstats(pData); */
689#endif /* VBOX_WITH_STATISTICS */
690}
691
692/**
693 * Deregister statistics.
694 */
695void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
696{
697 if (pData == NULL)
698 return;
699#ifdef VBOX_WITH_STATISTICS
700# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
701# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
702# include "counters.h"
703#endif /* VBOX_WITH_STATISTICS */
704}
705
706/**
707 * Marks the link as up, making it possible to establish new connections.
708 */
709void slirp_link_up(PNATState pData)
710{
711 struct arp_cache_entry *ac;
712 link_up = 1;
713
714 if (LIST_EMPTY(&pData->arp_cache))
715 return;
716
717 LIST_FOREACH(ac, &pData->arp_cache, list)
718 {
719 activate_port_forwarding(pData, ac->ether);
720 }
721}
722
723/**
724 * Marks the link as down and cleans up the current connections.
725 */
726void slirp_link_down(PNATState pData)
727{
728 struct socket *so;
729 struct port_forward_rule *rule;
730
731 while ((so = tcb.so_next) != &tcb)
732 {
733 if (so->so_state & SS_NOFDREF || so->s == -1)
734 sofree(pData, so);
735 else
736 tcp_drop(pData, sototcpcb(so), 0);
737 }
738
739 while ((so = udb.so_next) != &udb)
740 udp_detach(pData, so);
741
742 /*
743 * Clear the active state of port-forwarding rules to force
744 * re-setup on restoration of communications.
745 */
746 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
747 {
748 rule->activated = 0;
749 }
750 pData->cRedirectionsActive = 0;
751
752 link_up = 0;
753}
754
755/**
756 * Terminates the slirp component.
757 */
758void slirp_term(PNATState pData)
759{
760 if (pData == NULL)
761 return;
762#ifdef RT_OS_WINDOWS
763 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
764 FreeLibrary(pData->hmIcmpLibrary);
765 RTMemFree(pData->pvIcmpBuffer);
766#else
767 closesocket(pData->icmp_socket.s);
768#endif
769
770 slirp_link_down(pData);
771 slirp_release_dns_list(pData);
772 ftp_alias_unload(pData);
773 nbt_alias_unload(pData);
774 if (pData->fUseHostResolver)
775 dns_alias_unload(pData);
776 while (!LIST_EMPTY(&instancehead))
777 {
778 struct libalias *la = LIST_FIRST(&instancehead);
779 /* libalias do all clean up */
780 LibAliasUninit(la);
781 }
782 while (!LIST_EMPTY(&pData->arp_cache))
783 {
784 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
785 LIST_REMOVE(ac, list);
786 RTMemFree(ac);
787 }
788 bootp_dhcp_fini(pData);
789 m_fini(pData);
790#ifdef RT_OS_WINDOWS
791 WSACleanup();
792#endif
793#ifndef VBOX_WITH_SLIRP_BSD_SBUF
794#ifdef LOG_ENABLED
795 Log(("\n"
796 "NAT statistics\n"
797 "--------------\n"
798 "\n"));
799 ipstats(pData);
800 tcpstats(pData);
801 udpstats(pData);
802 icmpstats(pData);
803 mbufstats(pData);
804 sockstats(pData);
805 Log(("\n"
806 "\n"
807 "\n"));
808#endif
809#endif
810 RTMemFree(pData);
811}
812
813
814#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
815#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
816
817/*
818 * curtime kept to an accuracy of 1ms
819 */
820static void updtime(PNATState pData)
821{
822#ifdef RT_OS_WINDOWS
823 struct _timeb tb;
824
825 _ftime(&tb);
826 curtime = (u_int)tb.time * (u_int)1000;
827 curtime += (u_int)tb.millitm;
828#else
829 gettimeofday(&tt, 0);
830
831 curtime = (u_int)tt.tv_sec * (u_int)1000;
832 curtime += (u_int)tt.tv_usec / (u_int)1000;
833
834 if ((tt.tv_usec % 1000) >= 500)
835 curtime++;
836#endif
837}
838
839#ifdef RT_OS_WINDOWS
840void slirp_select_fill(PNATState pData, int *pnfds)
841#else /* RT_OS_WINDOWS */
842void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
843#endif /* !RT_OS_WINDOWS */
844{
845 struct socket *so, *so_next;
846 int nfds;
847#if defined(RT_OS_WINDOWS)
848 int rc;
849 int error;
850#else
851 int poll_index = 0;
852#endif
853 int i;
854
855 STAM_PROFILE_START(&pData->StatFill, a);
856
857 nfds = *pnfds;
858
859 /*
860 * First, TCP sockets
861 */
862 do_slowtimo = 0;
863 if (!link_up)
864 goto done;
865
866 /*
867 * *_slowtimo needs calling if there are IP fragments
868 * in the fragment queue, or there are TCP connections active
869 */
870 /* XXX:
871 * triggering of fragment expiration should be the same but use new macroses
872 */
873 do_slowtimo = (tcb.so_next != &tcb);
874 if (!do_slowtimo)
875 {
876 for (i = 0; i < IPREASS_NHASH; i++)
877 {
878 if (!TAILQ_EMPTY(&ipq[i]))
879 {
880 do_slowtimo = 1;
881 break;
882 }
883 }
884 }
885 /* always add the ICMP socket */
886#ifndef RT_OS_WINDOWS
887 pData->icmp_socket.so_poll_index = -1;
888#endif
889 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
890
891 STAM_COUNTER_RESET(&pData->StatTCP);
892 STAM_COUNTER_RESET(&pData->StatTCPHot);
893
894 QSOCKET_FOREACH(so, so_next, tcp)
895 /* { */
896#if !defined(RT_OS_WINDOWS)
897 so->so_poll_index = -1;
898#endif
899 STAM_COUNTER_INC(&pData->StatTCP);
900
901 /*
902 * See if we need a tcp_fasttimo
903 */
904 if ( time_fasttimo == 0
905 && so->so_tcpcb != NULL
906 && so->so_tcpcb->t_flags & TF_DELACK)
907 {
908 time_fasttimo = curtime; /* Flag when we want a fasttimo */
909 }
910
911 /*
912 * NOFDREF can include still connecting to local-host,
913 * newly socreated() sockets etc. Don't want to select these.
914 */
915 if (so->so_state & SS_NOFDREF || so->s == -1)
916 CONTINUE(tcp);
917
918 /*
919 * Set for reading sockets which are accepting
920 */
921 if (so->so_state & SS_FACCEPTCONN)
922 {
923 STAM_COUNTER_INC(&pData->StatTCPHot);
924 TCP_ENGAGE_EVENT1(so, readfds);
925 CONTINUE(tcp);
926 }
927
928 /*
929 * Set for writing sockets which are connecting
930 */
931 if (so->so_state & SS_ISFCONNECTING)
932 {
933 Log2(("connecting %R[natsock] engaged\n",so));
934 STAM_COUNTER_INC(&pData->StatTCPHot);
935 TCP_ENGAGE_EVENT1(so, writefds);
936 }
937
938 /*
939 * Set for writing if we are connected, can send more, and
940 * we have something to send
941 */
942 if (CONN_CANFSEND(so) && SBUF_LEN(&so->so_rcv))
943 {
944 STAM_COUNTER_INC(&pData->StatTCPHot);
945 TCP_ENGAGE_EVENT1(so, writefds);
946 }
947
948 /*
949 * Set for reading (and urgent data) if we are connected, can
950 * receive more, and we have room for it XXX /2 ?
951 */
952 /* @todo: vvl - check which predicat here will be more useful here in rerm of new sbufs. */
953 if (CONN_CANFRCV(so) && (SBUF_LEN(&so->so_snd) < (SBUF_SIZE(&so->so_snd)/2)))
954 {
955 STAM_COUNTER_INC(&pData->StatTCPHot);
956 TCP_ENGAGE_EVENT2(so, readfds, xfds);
957 }
958 LOOP_LABEL(tcp, so, so_next);
959 }
960
961 /*
962 * UDP sockets
963 */
964 STAM_COUNTER_RESET(&pData->StatUDP);
965 STAM_COUNTER_RESET(&pData->StatUDPHot);
966
967 QSOCKET_FOREACH(so, so_next, udp)
968 /* { */
969
970 STAM_COUNTER_INC(&pData->StatUDP);
971#if !defined(RT_OS_WINDOWS)
972 so->so_poll_index = -1;
973#endif
974
975 /*
976 * See if it's timed out
977 */
978 if (so->so_expire)
979 {
980 if (so->so_expire <= curtime)
981 {
982 Log2(("NAT: %R[natsock] expired\n", so));
983 if (so->so_timeout != NULL)
984 {
985 so->so_timeout(pData, so, so->so_timeout_arg);
986 }
987#ifdef VBOX_WITH_SLIRP_MT
988 /* we need so_next for continue our cycle*/
989 so_next = so->so_next;
990#endif
991 UDP_DETACH(pData, so, so_next);
992 CONTINUE_NO_UNLOCK(udp);
993 }
994 }
995
996 /*
997 * When UDP packets are received from over the link, they're
998 * sendto()'d straight away, so no need for setting for writing
999 * Limit the number of packets queued by this session to 4.
1000 * Note that even though we try and limit this to 4 packets,
1001 * the session could have more queued if the packets needed
1002 * to be fragmented.
1003 *
1004 * (XXX <= 4 ?)
1005 */
1006 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
1007 {
1008 STAM_COUNTER_INC(&pData->StatUDPHot);
1009 UDP_ENGAGE_EVENT(so, readfds);
1010 }
1011 LOOP_LABEL(udp, so, so_next);
1012 }
1013done:
1014
1015#if defined(RT_OS_WINDOWS)
1016 *pnfds = VBOX_EVENT_COUNT;
1017#else /* RT_OS_WINDOWS */
1018 AssertRelease(poll_index <= *pnfds);
1019 *pnfds = poll_index;
1020#endif /* !RT_OS_WINDOWS */
1021
1022 STAM_PROFILE_STOP(&pData->StatFill, a);
1023}
1024
1025#if defined(RT_OS_WINDOWS)
1026void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1027#else /* RT_OS_WINDOWS */
1028void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1029#endif /* !RT_OS_WINDOWS */
1030{
1031 struct socket *so, *so_next;
1032 int ret;
1033#if defined(RT_OS_WINDOWS)
1034 WSANETWORKEVENTS NetworkEvents;
1035 int rc;
1036 int error;
1037#else
1038 int poll_index = 0;
1039#endif
1040
1041 STAM_PROFILE_START(&pData->StatPoll, a);
1042
1043 /* Update time */
1044 updtime(pData);
1045
1046 /*
1047 * See if anything has timed out
1048 */
1049 if (link_up)
1050 {
1051 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1052 {
1053 STAM_PROFILE_START(&pData->StatFastTimer, b);
1054 tcp_fasttimo(pData);
1055 time_fasttimo = 0;
1056 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1057 }
1058 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1059 {
1060 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1061 ip_slowtimo(pData);
1062 tcp_slowtimo(pData);
1063 last_slowtimo = curtime;
1064 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1065 }
1066 }
1067#if defined(RT_OS_WINDOWS)
1068 if (fTimeout)
1069 return; /* only timer update */
1070#endif
1071
1072 /*
1073 * Check sockets
1074 */
1075 if (!link_up)
1076 goto done;
1077#if defined(RT_OS_WINDOWS)
1078 /*XXX: before renaming please make see define
1079 * fIcmp in slirp_state.h
1080 */
1081 if (fIcmp)
1082 sorecvfrom(pData, &pData->icmp_socket);
1083#else
1084 if ( (pData->icmp_socket.s != -1)
1085 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1086 sorecvfrom(pData, &pData->icmp_socket);
1087#endif
1088 /*
1089 * Check TCP sockets
1090 */
1091 QSOCKET_FOREACH(so, so_next, tcp)
1092 /* { */
1093
1094#ifdef VBOX_WITH_SLIRP_MT
1095 if ( so->so_state & SS_NOFDREF
1096 && so->so_deleted == 1)
1097 {
1098 struct socket *son, *sop = NULL;
1099 QSOCKET_LOCK(tcb);
1100 if (so->so_next != NULL)
1101 {
1102 if (so->so_next != &tcb)
1103 SOCKET_LOCK(so->so_next);
1104 son = so->so_next;
1105 }
1106 if ( so->so_prev != &tcb
1107 && so->so_prev != NULL)
1108 {
1109 SOCKET_LOCK(so->so_prev);
1110 sop = so->so_prev;
1111 }
1112 QSOCKET_UNLOCK(tcb);
1113 remque(pData, so);
1114 NSOCK_DEC();
1115 SOCKET_UNLOCK(so);
1116 SOCKET_LOCK_DESTROY(so);
1117 RTMemFree(so);
1118 so_next = son;
1119 if (sop != NULL)
1120 SOCKET_UNLOCK(sop);
1121 CONTINUE_NO_UNLOCK(tcp);
1122 }
1123#endif
1124 /*
1125 * FD_ISSET is meaningless on these sockets
1126 * (and they can crash the program)
1127 */
1128 if (so->so_state & SS_NOFDREF || so->s == -1)
1129 CONTINUE(tcp);
1130
1131 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1132
1133 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1134
1135
1136 /*
1137 * Check for URG data
1138 * This will soread as well, so no need to
1139 * test for readfds below if this succeeds
1140 */
1141
1142 /* out-of-band data */
1143 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1144#ifdef RT_OS_DARWIN
1145 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1146 * combination on other Unixs hosts doesn't enter to this branch
1147 */
1148 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1149#endif
1150 )
1151 {
1152 sorecvoob(pData, so);
1153 }
1154
1155 /*
1156 * Check sockets for reading
1157 */
1158 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1159 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1160 {
1161 /*
1162 * Check for incoming connections
1163 */
1164 if (so->so_state & SS_FACCEPTCONN)
1165 {
1166 TCP_CONNECT(pData, so);
1167 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1168 CONTINUE(tcp);
1169 }
1170
1171 ret = soread(pData, so);
1172 /* Output it if we read something */
1173 if (RT_LIKELY(ret > 0))
1174 TCP_OUTPUT(pData, sototcpcb(so));
1175 }
1176
1177 /*
1178 * Check for FD_CLOSE events.
1179 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1180 */
1181 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1182 || (so->so_close == 1))
1183 {
1184 /*
1185 * drain the socket
1186 */
1187 for (;;)
1188 {
1189 ret = soread(pData, so);
1190 if (ret > 0)
1191 TCP_OUTPUT(pData, sototcpcb(so));
1192 else
1193 {
1194 Log2(("%R[natsock] errno %d (%s)\n", so, errno, strerror(errno)));
1195 break;
1196 }
1197 }
1198 /* mark the socket for termination _after_ it was drained */
1199 so->so_close = 1;
1200 /* No idea about Windows but on Posix, POLLHUP means that we can't send more.
1201 * Actually in the specific error scenario, POLLERR is set as well. */
1202#ifndef RT_OS_WINDOWS
1203 if (CHECK_FD_SET(so, NetworkEvents, rderr))
1204 sofcantsendmore(so);
1205#endif
1206 CONTINUE(tcp);
1207 }
1208
1209 /*
1210 * Check sockets for writing
1211 */
1212 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1213 {
1214 /*
1215 * Check for non-blocking, still-connecting sockets
1216 */
1217 if (so->so_state & SS_ISFCONNECTING)
1218 {
1219 Log2(("connecting %R[natsock] catched\n", so));
1220 /* Connected */
1221 so->so_state &= ~SS_ISFCONNECTING;
1222
1223 /*
1224 * This should be probably guarded by PROBE_CONN too. Anyway,
1225 * we disable it on OS/2 because the below send call returns
1226 * EFAULT which causes the opened TCP socket to close right
1227 * after it has been opened and connected.
1228 */
1229#ifndef RT_OS_OS2
1230 ret = send(so->s, (const char *)&ret, 0, 0);
1231 if (ret < 0)
1232 {
1233 /* XXXXX Must fix, zero bytes is a NOP */
1234 if ( errno == EAGAIN
1235 || errno == EWOULDBLOCK
1236 || errno == EINPROGRESS
1237 || errno == ENOTCONN)
1238 CONTINUE(tcp);
1239
1240 /* else failed */
1241 so->so_state = SS_NOFDREF;
1242 }
1243 /* else so->so_state &= ~SS_ISFCONNECTING; */
1244#endif
1245
1246 /*
1247 * Continue tcp_input
1248 */
1249 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1250 /* continue; */
1251 }
1252 else
1253 SOWRITE(ret, pData, so);
1254 /*
1255 * XXX If we wrote something (a lot), there could be the need
1256 * for a window update. In the worst case, the remote will send
1257 * a window probe to get things going again.
1258 */
1259 }
1260
1261 /*
1262 * Probe a still-connecting, non-blocking socket
1263 * to check if it's still alive
1264 */
1265#ifdef PROBE_CONN
1266 if (so->so_state & SS_ISFCONNECTING)
1267 {
1268 ret = recv(so->s, (char *)&ret, 0, 0);
1269
1270 if (ret < 0)
1271 {
1272 /* XXX */
1273 if ( errno == EAGAIN
1274 || errno == EWOULDBLOCK
1275 || errno == EINPROGRESS
1276 || errno == ENOTCONN)
1277 {
1278 CONTINUE(tcp); /* Still connecting, continue */
1279 }
1280
1281 /* else failed */
1282 so->so_state = SS_NOFDREF;
1283
1284 /* tcp_input will take care of it */
1285 }
1286 else
1287 {
1288 ret = send(so->s, &ret, 0, 0);
1289 if (ret < 0)
1290 {
1291 /* XXX */
1292 if ( errno == EAGAIN
1293 || errno == EWOULDBLOCK
1294 || errno == EINPROGRESS
1295 || errno == ENOTCONN)
1296 {
1297 CONTINUE(tcp);
1298 }
1299 /* else failed */
1300 so->so_state = SS_NOFDREF;
1301 }
1302 else
1303 so->so_state &= ~SS_ISFCONNECTING;
1304
1305 }
1306 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1307 } /* SS_ISFCONNECTING */
1308#endif
1309 LOOP_LABEL(tcp, so, so_next);
1310 }
1311
1312 /*
1313 * Now UDP sockets.
1314 * Incoming packets are sent straight away, they're not buffered.
1315 * Incoming UDP data isn't buffered either.
1316 */
1317 QSOCKET_FOREACH(so, so_next, udp)
1318 /* { */
1319#ifdef VBOX_WITH_SLIRP_MT
1320 if ( so->so_state & SS_NOFDREF
1321 && so->so_deleted == 1)
1322 {
1323 struct socket *son, *sop = NULL;
1324 QSOCKET_LOCK(udb);
1325 if (so->so_next != NULL)
1326 {
1327 if (so->so_next != &udb)
1328 SOCKET_LOCK(so->so_next);
1329 son = so->so_next;
1330 }
1331 if ( so->so_prev != &udb
1332 && so->so_prev != NULL)
1333 {
1334 SOCKET_LOCK(so->so_prev);
1335 sop = so->so_prev;
1336 }
1337 QSOCKET_UNLOCK(udb);
1338 remque(pData, so);
1339 NSOCK_DEC();
1340 SOCKET_UNLOCK(so);
1341 SOCKET_LOCK_DESTROY(so);
1342 RTMemFree(so);
1343 so_next = son;
1344 if (sop != NULL)
1345 SOCKET_UNLOCK(sop);
1346 CONTINUE_NO_UNLOCK(udp);
1347 }
1348#endif
1349 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1350
1351 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1352
1353 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1354 {
1355 SORECVFROM(pData, so);
1356 }
1357 LOOP_LABEL(udp, so, so_next);
1358 }
1359
1360done:
1361
1362 STAM_PROFILE_STOP(&pData->StatPoll, a);
1363}
1364
1365
1366struct arphdr
1367{
1368 unsigned short ar_hrd; /* format of hardware address */
1369 unsigned short ar_pro; /* format of protocol address */
1370 unsigned char ar_hln; /* length of hardware address */
1371 unsigned char ar_pln; /* length of protocol address */
1372 unsigned short ar_op; /* ARP opcode (command) */
1373
1374 /*
1375 * Ethernet looks like this : This bit is variable sized however...
1376 */
1377 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1378 unsigned char ar_sip[4]; /* sender IP address */
1379 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1380 unsigned char ar_tip[4]; /* target IP address */
1381};
1382AssertCompileSize(struct arphdr, 28);
1383
1384/**
1385 * @note This function will free m!
1386 */
1387static void arp_input(PNATState pData, struct mbuf *m)
1388{
1389 struct ethhdr *eh;
1390 struct ethhdr *reh;
1391 struct arphdr *ah;
1392 struct arphdr *rah;
1393 int ar_op;
1394 uint32_t htip;
1395 uint32_t tip;
1396 struct mbuf *mr;
1397 eh = mtod(m, struct ethhdr *);
1398 ah = (struct arphdr *)&eh[1];
1399 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1400 tip = *(uint32_t*)ah->ar_tip;
1401
1402 ar_op = RT_N2H_U16(ah->ar_op);
1403
1404 switch (ar_op)
1405 {
1406 case ARPOP_REQUEST:
1407 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1408 if (!mr)
1409 break;
1410 reh = mtod(mr, struct ethhdr *);
1411 mr->m_data += ETH_HLEN;
1412 rah = mtod(mr, struct arphdr *);
1413 mr->m_len = sizeof(struct arphdr);
1414 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1415 if ( 0
1416#ifdef VBOX_WITH_NAT_SERVICE
1417 || (tip == pData->special_addr.s_addr)
1418#endif
1419 || ( ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1420 && ( CTL_CHECK(htip, CTL_DNS)
1421 || CTL_CHECK(htip, CTL_ALIAS)
1422 || CTL_CHECK(htip, CTL_TFTP))
1423 )
1424 )
1425 {
1426 rah->ar_hrd = RT_H2N_U16_C(1);
1427 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1428 rah->ar_hln = ETH_ALEN;
1429 rah->ar_pln = 4;
1430 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1431 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1432
1433 switch (htip & ~pData->netmask)
1434 {
1435 case CTL_DNS:
1436 case CTL_ALIAS:
1437 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1438 break;
1439 default:;
1440 }
1441
1442 memcpy(rah->ar_sip, ah->ar_tip, 4);
1443 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1444 memcpy(rah->ar_tip, ah->ar_sip, 4);
1445 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1446 }
1447 else
1448 m_freem(pData, mr);
1449
1450 /* Gratuitous ARP */
1451 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1452 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1453 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1454 {
1455 /* We've received an announce about address assignment,
1456 * let's do an ARP cache update
1457 */
1458 static bool fGratuitousArpReported;
1459 if (!fGratuitousArpReported)
1460 {
1461 LogRel(("NAT: Gratuitous ARP [IP:%R[IP4], ether:%R[ether]]\n",
1462 ah->ar_sip, ah->ar_sha));
1463 fGratuitousArpReported = true;
1464 }
1465 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]);
1466 }
1467 break;
1468
1469 case ARPOP_REPLY:
1470 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]);
1471 break;
1472
1473 default:
1474 break;
1475 }
1476
1477 m_freem(pData, m);
1478}
1479
1480/**
1481 * Feed a packet into the slirp engine.
1482 *
1483 * @param m Data buffer, m_len is not valid.
1484 * @param cbBuf The length of the data in m.
1485 */
1486void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1487{
1488 int proto;
1489 static bool fWarnedIpv6;
1490 struct ethhdr *eh;
1491 uint8_t au8Ether[ETH_ALEN];
1492
1493 m->m_len = cbBuf;
1494 if (cbBuf < ETH_HLEN)
1495 {
1496 Log(("NAT: packet having size %d has been ignored\n", m->m_len));
1497 m_freem(pData, m);
1498 return;
1499 }
1500 eh = mtod(m, struct ethhdr *);
1501 proto = RT_N2H_U16(eh->h_proto);
1502
1503 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1504
1505 switch(proto)
1506 {
1507 case ETH_P_ARP:
1508 arp_input(pData, m);
1509 break;
1510
1511 case ETH_P_IP:
1512 /* Update time. Important if the network is very quiet, as otherwise
1513 * the first outgoing connection gets an incorrect timestamp. */
1514 updtime(pData);
1515 m_adj(m, ETH_HLEN);
1516 M_ASSERTPKTHDR(m);
1517 m->m_pkthdr.header = mtod(m, void *);
1518 ip_input(pData, m);
1519 break;
1520
1521 case ETH_P_IPV6:
1522 m_freem(pData, m);
1523 if (!fWarnedIpv6)
1524 {
1525 LogRel(("NAT: IPv6 not supported\n"));
1526 fWarnedIpv6 = true;
1527 }
1528 break;
1529
1530 default:
1531 Log(("NAT: Unsupported protocol %x\n", proto));
1532 m_freem(pData, m);
1533 break;
1534 }
1535
1536 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1537 activate_port_forwarding(pData, au8Ether);
1538}
1539
1540/**
1541 * Output the IP packet to the ethernet device.
1542 *
1543 * @note This function will free m!
1544 */
1545void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1546{
1547 struct ethhdr *eh;
1548 uint8_t *buf = NULL;
1549 uint8_t *mbuf = NULL;
1550 size_t mlen = 0;
1551 STAM_PROFILE_START(&pData->StatIF_encap, a);
1552
1553 M_ASSERTPKTHDR(m);
1554 m->m_data -= ETH_HLEN;
1555 m->m_len += ETH_HLEN;
1556 eh = mtod(m, struct ethhdr *);
1557 mlen = m->m_len;
1558
1559 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1560 {
1561 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1562 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1563 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1564 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1565 {
1566 /* don't do anything */
1567 m_freem(pData, m);
1568 goto done;
1569 }
1570 }
1571 /*
1572 * we're processing the chain, that isn't not expected.
1573 */
1574 Assert((!m->m_next));
1575 if (m->m_next)
1576 {
1577 Log(("NAT: if_encap's recived the chain, dropping...\n"));
1578 m_freem(pData, m);
1579 goto done;
1580 }
1581 mbuf = mtod(m, uint8_t *);
1582 eh->h_proto = RT_H2N_U16(eth_proto);
1583 if (flags & ETH_ENCAP_URG)
1584 slirp_urg_output(pData->pvUser, m, mbuf, mlen);
1585 else
1586 slirp_output(pData->pvUser, m, mbuf, mlen);
1587done:
1588 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1589}
1590
1591/**
1592 * Still we're using dhcp server leasing to map ether to IP
1593 * @todo see rt_lookup_in_cache
1594 */
1595static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1596{
1597 uint32_t ip = INADDR_ANY;
1598 int rc;
1599
1600 if (eth_addr == NULL)
1601 return INADDR_ANY;
1602
1603 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1604 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1605 return INADDR_ANY;
1606
1607 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1608 if (RT_SUCCESS(rc))
1609 return ip;
1610
1611 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1612 /* ignore return code, ip will be set to INADDR_ANY on error */
1613 return ip;
1614}
1615
1616/**
1617 * We need check if we've activated port forwarding
1618 * for specific machine ... that of course relates to
1619 * service mode
1620 * @todo finish this for service case
1621 */
1622static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1623{
1624 struct port_forward_rule *rule, *tmp;
1625
1626 /* check mac here */
1627 LIST_FOREACH_SAFE(rule, &pData->port_forward_rule_head, list, tmp)
1628 {
1629 struct socket *so;
1630 struct alias_link *alias_link;
1631 struct libalias *lib;
1632 int flags;
1633 struct sockaddr sa;
1634 struct sockaddr_in *psin;
1635 socklen_t socketlen;
1636 struct in_addr alias;
1637 int rc;
1638 uint32_t guest_addr; /* need to understand if we already give address to guest */
1639
1640 if (rule->activated)
1641 continue;
1642
1643#ifdef VBOX_WITH_NAT_SERVICE
1644 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1645 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1646 guest_addr = find_guest_ip(pData, h_source);
1647#else
1648#if 0
1649 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1650 continue;
1651#endif
1652 guest_addr = find_guest_ip(pData, h_source);
1653#endif
1654 if (guest_addr == INADDR_ANY)
1655 {
1656 /* the address wasn't granted */
1657 return;
1658 }
1659
1660#if !defined(VBOX_WITH_NAT_SERVICE)
1661 if ( rule->guest_addr.s_addr != guest_addr
1662 && rule->guest_addr.s_addr != INADDR_ANY)
1663 continue;
1664 if (rule->guest_addr.s_addr == INADDR_ANY)
1665 rule->guest_addr.s_addr = guest_addr;
1666#endif
1667
1668 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1669 rule->proto == IPPROTO_UDP ? "UDP" : "TCP", rule->host_port, rule->guest_port, &guest_addr));
1670
1671 if (rule->proto == IPPROTO_UDP)
1672 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1673 RT_H2N_U16(rule->guest_port), 0);
1674 else
1675 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1676 RT_H2N_U16(rule->guest_port), 0);
1677
1678 if (so == NULL)
1679 goto remove_port_forwarding;
1680
1681 psin = (struct sockaddr_in *)&sa;
1682 psin->sin_family = AF_INET;
1683 psin->sin_port = 0;
1684 psin->sin_addr.s_addr = INADDR_ANY;
1685 socketlen = sizeof(struct sockaddr);
1686
1687 rc = getsockname(so->s, &sa, &socketlen);
1688 if (rc < 0 || sa.sa_family != AF_INET)
1689 goto remove_port_forwarding;
1690
1691 psin = (struct sockaddr_in *)&sa;
1692
1693 lib = LibAliasInit(pData, NULL);
1694 flags = LibAliasSetMode(lib, 0, 0);
1695 flags |= pData->i32AliasMode;
1696 flags |= PKT_ALIAS_REVERSE; /* set reverse */
1697 flags = LibAliasSetMode(lib, flags, ~0);
1698
1699 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1700 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1701 alias, RT_H2N_U16(rule->guest_port),
1702 pData->special_addr, -1, /* not very clear for now */
1703 rule->proto);
1704 if (!alias_link)
1705 goto remove_port_forwarding;
1706
1707 so->so_la = lib;
1708 rule->activated = 1;
1709 rule->so = so;
1710 pData->cRedirectionsActive++;
1711 continue;
1712
1713 remove_port_forwarding:
1714 LogRel(("NAT: failed to redirect %s %d => %d\n",
1715 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1716 LIST_REMOVE(rule, list);
1717 pData->cRedirectionsStored--;
1718 RTMemFree(rule);
1719 }
1720}
1721
1722/**
1723 * Changes in 3.1 instead of opening new socket do the following:
1724 * gain more information:
1725 * 1. bind IP
1726 * 2. host port
1727 * 3. guest port
1728 * 4. proto
1729 * 5. guest MAC address
1730 * the guest's MAC address is rather important for service, but we easily
1731 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1732 * corresponding port-forwarding
1733 */
1734int slirp_add_redirect(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1735 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1736{
1737 struct port_forward_rule *rule = NULL;
1738 Assert(ethaddr);
1739 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1740 {
1741 if ( rule->proto == (is_udp ? IPPROTO_UDP : IPPROTO_TCP)
1742 && rule->host_port == host_port
1743 && rule->bind_ip.s_addr == host_addr.s_addr
1744 && rule->guest_port == guest_port
1745 && rule->guest_addr.s_addr == guest_addr.s_addr
1746 )
1747 return 0; /* rule has been already registered */
1748 }
1749
1750 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1751 if (rule == NULL)
1752 return 1;
1753
1754 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1755 rule->host_port = host_port;
1756 rule->guest_port = guest_port;
1757 rule->guest_addr.s_addr = guest_addr.s_addr;
1758 rule->bind_ip.s_addr = host_addr.s_addr;
1759 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1760 /* @todo add mac address */
1761 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1762 pData->cRedirectionsStored++;
1763 /* activate port-forwarding if guest has already got assigned IP */
1764 if (memcmp(ethaddr, zerro_ethaddr, ETH_ALEN))
1765 activate_port_forwarding(pData, ethaddr);
1766 return 0;
1767}
1768
1769int slirp_remove_redirect(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1770 struct in_addr guest_addr, int guest_port)
1771{
1772 struct port_forward_rule *rule = NULL;
1773 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1774 {
1775 if ( rule->proto == (is_udp ? IPPROTO_UDP : IPPROTO_TCP)
1776 && rule->host_port == host_port
1777 && rule->guest_port == guest_port
1778 && rule->bind_ip.s_addr == host_addr.s_addr
1779 && rule->guest_addr.s_addr == guest_addr.s_addr
1780 && rule->activated)
1781 {
1782 LogRel(("NAT: remove redirect %s host port %d => guest port %d @ %R[IP4]\n",
1783 rule->proto == IPPROTO_UDP ? "UDP" : "TCP", rule->host_port, rule->guest_port, &guest_addr));
1784
1785 LibAliasUninit(rule->so->so_la);
1786 if (is_udp)
1787 udp_detach(pData, rule->so);
1788 else
1789 tcp_close(pData, sototcpcb(rule->so));
1790 LIST_REMOVE(rule, list);
1791 RTMemFree(rule);
1792 pData->cRedirectionsStored--;
1793 break;
1794 }
1795
1796 }
1797 return 0;
1798}
1799
1800void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1801{
1802#ifndef VBOX_WITH_NAT_SERVICE
1803 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1804#endif
1805 if (GuestIP != INADDR_ANY)
1806 {
1807 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1808 activate_port_forwarding(pData, ethaddr);
1809 }
1810}
1811
1812#if defined(RT_OS_WINDOWS)
1813HANDLE *slirp_get_events(PNATState pData)
1814{
1815 return pData->phEvents;
1816}
1817void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1818{
1819 pData->phEvents[index] = hEvent;
1820}
1821#endif
1822
1823unsigned int slirp_get_timeout_ms(PNATState pData)
1824{
1825 if (link_up)
1826 {
1827 if (time_fasttimo)
1828 return 2;
1829 if (do_slowtimo)
1830 return 500; /* see PR_SLOWHZ */
1831 }
1832 return 3600*1000; /* one hour */
1833}
1834
1835#ifndef RT_OS_WINDOWS
1836int slirp_get_nsock(PNATState pData)
1837{
1838 return pData->nsock;
1839}
1840#endif
1841
1842/*
1843 * this function called from NAT thread
1844 */
1845void slirp_post_sent(PNATState pData, void *pvArg)
1846{
1847 struct socket *so = 0;
1848 struct tcpcb *tp = 0;
1849 struct mbuf *m = (struct mbuf *)pvArg;
1850 m_freem(pData, m);
1851}
1852#ifdef VBOX_WITH_SLIRP_MT
1853void slirp_process_queue(PNATState pData)
1854{
1855 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1856}
1857void *slirp_get_queue(PNATState pData)
1858{
1859 return pData->pReqQueue;
1860}
1861#endif
1862
1863void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1864{
1865 Log2(("tftp_prefix: %s\n", tftpPrefix));
1866 tftp_prefix = tftpPrefix;
1867}
1868
1869void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1870{
1871 Log2(("bootFile: %s\n", bootFile));
1872 bootp_filename = bootFile;
1873}
1874
1875void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1876{
1877 Log2(("next_server: %s\n", next_server));
1878 if (next_server == NULL)
1879 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1880 else
1881 inet_aton(next_server, &pData->tftp_server);
1882}
1883
1884int slirp_set_binding_address(PNATState pData, char *addr)
1885{
1886 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1887 {
1888 pData->bindIP.s_addr = INADDR_ANY;
1889 return 1;
1890 }
1891 return 0;
1892}
1893
1894void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1895{
1896 if (!pData->fUseHostResolver)
1897 {
1898 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1899 pData->fUseDnsProxy = fDNSProxy;
1900 }
1901 else
1902 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1903}
1904
1905#define CHECK_ARG(name, val, lim_min, lim_max) \
1906 do { \
1907 if ((val) < (lim_min) || (val) > (lim_max)) \
1908 { \
1909 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1910 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1911 return; \
1912 } \
1913 else \
1914 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1915 } while (0)
1916
1917/* don't allow user set less 8kB and more than 1M values */
1918#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1919void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1920{
1921 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1922 pData->socket_rcv = kilobytes;
1923}
1924void slirp_set_sndbuf(PNATState pData, int kilobytes)
1925{
1926 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1927 pData->socket_snd = kilobytes * _1K;
1928}
1929void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1930{
1931 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1932 tcp_rcvspace = kilobytes * _1K;
1933}
1934void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1935{
1936 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1937 tcp_sndspace = kilobytes * _1K;
1938}
1939
1940/*
1941 * Looking for Ether by ip in ARP-cache
1942 * Note: it´s responsible of caller to allocate buffer for result
1943 * @returns iprt status code
1944 */
1945int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1946{
1947 struct arp_cache_entry *ac;
1948
1949 if (ether == NULL)
1950 return VERR_INVALID_PARAMETER;
1951
1952 if (LIST_EMPTY(&pData->arp_cache))
1953 return VERR_NOT_FOUND;
1954
1955 LIST_FOREACH(ac, &pData->arp_cache, list)
1956 {
1957 if ( ac->ip == ip
1958 && memcmp(ac->ether, broadcast_ethaddr, ETH_ALEN) != 0)
1959 {
1960 memcpy(ether, ac->ether, ETH_ALEN);
1961 return VINF_SUCCESS;
1962 }
1963 }
1964 return VERR_NOT_FOUND;
1965}
1966
1967/*
1968 * Looking for IP by Ether in ARP-cache
1969 * Note: it´s responsible of caller to allocate buffer for result
1970 * @returns 0 - if found, 1 - otherwise
1971 */
1972int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1973{
1974 struct arp_cache_entry *ac;
1975 *ip = INADDR_ANY;
1976
1977 if (LIST_EMPTY(&pData->arp_cache))
1978 return VERR_NOT_FOUND;
1979
1980 LIST_FOREACH(ac, &pData->arp_cache, list)
1981 {
1982 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
1983 {
1984 *ip = ac->ip;
1985 return VINF_SUCCESS;
1986 }
1987 }
1988 return VERR_NOT_FOUND;
1989}
1990
1991void slirp_arp_who_has(PNATState pData, uint32_t dst)
1992{
1993 struct mbuf *m;
1994 struct ethhdr *ehdr;
1995 struct arphdr *ahdr;
1996
1997 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1998 if (m == NULL)
1999 {
2000 Log(("NAT: Can't alloc mbuf for ARP request\n"));
2001 return;
2002 }
2003 ehdr = mtod(m, struct ethhdr *);
2004 memset(ehdr->h_source, 0xff, ETH_ALEN);
2005 ahdr = (struct arphdr *)&ehdr[1];
2006 ahdr->ar_hrd = RT_H2N_U16_C(1);
2007 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2008 ahdr->ar_hln = ETH_ALEN;
2009 ahdr->ar_pln = 4;
2010 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2011 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2012 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2013 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2014 *(uint32_t *)ahdr->ar_tip = dst;
2015 /* warn!!! should falls in mbuf minimal size */
2016 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2017 m->m_data += ETH_HLEN;
2018 m->m_len -= ETH_HLEN;
2019 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2020}
2021
2022int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2023{
2024 if (slirp_arp_cache_update(pData, dst, mac))
2025 slirp_arp_cache_add(pData, dst, mac);
2026
2027 return 0;
2028}
2029
2030/* updates the arp cache
2031 * @returns 0 - if has found and updated
2032 * 1 - if hasn't found.
2033 */
2034int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2035{
2036 struct arp_cache_entry *ac;
2037 if (!memcmp(mac, broadcast_ethaddr, ETH_ALEN))
2038 return 1;
2039 LIST_FOREACH(ac, &pData->arp_cache, list)
2040 {
2041 if (!memcmp(ac->ether, mac, ETH_ALEN))
2042 {
2043 ac->ip = dst;
2044 return 0;
2045 }
2046 }
2047 return 1;
2048}
2049
2050void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2051{
2052 struct arp_cache_entry *ac = NULL;
2053 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2054 if (ac == NULL)
2055 {
2056 Log(("NAT: Can't allocate arp cache entry\n"));
2057 return;
2058 }
2059 ac->ip = ip;
2060 if(!memcmp(ether, broadcast_ethaddr, ETH_ALEN))
2061 {
2062 static bool fBroadcastEtherAddReported;
2063 if (!fBroadcastEtherAddReported)
2064 {
2065 LogRel(("NAT: Attept to add pair [%R[ether]:%R[IP4]] was ignored\n",
2066 ether, ip));
2067 fBroadcastEtherAddReported = true;
2068 }
2069 RTMemFree(ac);
2070 return;
2071 }
2072 memcpy(ac->ether, ether, ETH_ALEN);
2073 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2074}
2075
2076void slirp_set_mtu(PNATState pData, int mtu)
2077{
2078 if (mtu < 20 || mtu >= 16000)
2079 {
2080 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2081 mtu = 1500;
2082 }
2083 /* MTU is maximum transition unit on */
2084 if_mtu =
2085 if_mru = mtu;
2086}
2087
2088/**
2089 * Info handler.
2090 */
2091void slirp_info(PNATState pData, PCDBGFINFOHLP pHlp, const char *pszArgs)
2092{
2093 struct socket *so, *so_next;
2094 struct arp_cache_entry *ac;
2095 struct port_forward_rule *rule;
2096
2097 pHlp->pfnPrintf(pHlp, "NAT parameters: MTU=%d\n", if_mtu);
2098 pHlp->pfnPrintf(pHlp, "NAT TCP ports:\n");
2099 QSOCKET_FOREACH(so, so_next, tcp)
2100 /* { */
2101 pHlp->pfnPrintf(pHlp, " %R[natsock]\n", so);
2102 }
2103
2104 pHlp->pfnPrintf(pHlp, "NAT UDP ports:\n");
2105 QSOCKET_FOREACH(so, so_next, udp)
2106 /* { */
2107 pHlp->pfnPrintf(pHlp, " %R[natsock]\n", so);
2108 }
2109
2110 pHlp->pfnPrintf(pHlp, "NAT ARP cache:\n");
2111 LIST_FOREACH(ac, &pData->arp_cache, list)
2112 {
2113 pHlp->pfnPrintf(pHlp, " %R[IP4] %R[ether]\n", &ac->ip, &ac->ether);
2114 }
2115
2116 pHlp->pfnPrintf(pHlp, "NAT rules:\n");
2117 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
2118 {
2119 pHlp->pfnPrintf(pHlp, " %s %d => %R[IP4]:%d %c\n",
2120 rule->proto == IPPROTO_UDP ? "UDP" : "TCP",
2121 rule->host_port, &rule->guest_addr.s_addr, rule->guest_port,
2122 rule->activated ? ' ' : '*');
2123 }
2124}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette