VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 28346

Last change on this file since 28346 was 28346, checked in by vboxsync, 15 years ago

NAT: do transprent proxying by default. some protocols might be confused e.g. vbox/6524.

  • Property svn:eol-style set to native
File size: 63.6 KB
Line 
1#include "slirp.h"
2#ifdef RT_OS_OS2
3# include <paths.h>
4#endif
5
6#include <VBox/err.h>
7#include <VBox/pdmdrv.h>
8#include <iprt/assert.h>
9#include <iprt/file.h>
10#ifndef RT_OS_WINDOWS
11# include <sys/ioctl.h>
12# include <poll.h>
13#else
14# include <Winnls.h>
15# define _WINSOCK2API_
16# include <IPHlpApi.h>
17#endif
18#include <alias.h>
19
20#ifndef RT_OS_WINDOWS
21
22# define DO_ENGAGE_EVENT1(so, fdset, label) \
23 do { \
24 if ( so->so_poll_index != -1 \
25 && so->s == polls[so->so_poll_index].fd) \
26 { \
27 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
28 break; \
29 } \
30 AssertRelease(poll_index < (nfds)); \
31 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
32 polls[poll_index].fd = (so)->s; \
33 (so)->so_poll_index = poll_index; \
34 polls[poll_index].events = N_(fdset ## _poll); \
35 polls[poll_index].revents = 0; \
36 poll_index++; \
37 } while (0)
38
39# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
40 do { \
41 if ( so->so_poll_index != -1 \
42 && so->s == polls[so->so_poll_index].fd) \
43 { \
44 polls[so->so_poll_index].events |= \
45 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
46 break; \
47 } \
48 AssertRelease(poll_index < (nfds)); \
49 polls[poll_index].fd = (so)->s; \
50 (so)->so_poll_index = poll_index; \
51 polls[poll_index].events = \
52 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
53 poll_index++; \
54 } while (0)
55
56# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
57
58/*
59 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
60 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
61 * used to catch POLLNVAL while logging and return false in case of error while
62 * normal usage.
63 */
64# define DO_CHECK_FD_SET(so, events, fdset) \
65 ( ((so)->so_poll_index != -1) \
66 && ((so)->so_poll_index <= ndfs) \
67 && ((so)->s == polls[so->so_poll_index].fd) \
68 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
69 && ( N_(fdset ## _poll) == POLLNVAL \
70 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
71
72 /* specific for Unix API */
73# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
74 /* specific for Windows Winsock API */
75# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
76
77# ifndef RT_OS_LINUX
78# define readfds_poll (POLLRDNORM)
79# define writefds_poll (POLLWRNORM)
80# else
81# define readfds_poll (POLLIN)
82# define writefds_poll (POLLOUT)
83# endif
84# define xfds_poll (POLLPRI)
85# define closefds_poll (POLLHUP)
86# define rderr_poll (POLLERR)
87# define rdhup_poll (POLLHUP)
88# define nval_poll (POLLNVAL)
89
90# define ICMP_ENGAGE_EVENT(so, fdset) \
91 do { \
92 if (pData->icmp_socket.s != -1) \
93 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
94 } while (0)
95
96#else /* RT_OS_WINDOWS */
97
98/*
99 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
100 * So no call to WSAEventSelect necessary.
101 */
102# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
103
104/*
105 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
106 */
107# define DO_ENGAGE_EVENT1(so, fdset1, label) \
108 do { \
109 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
110 if (rc == SOCKET_ERROR) \
111 { \
112 /* This should not happen */ \
113 error = WSAGetLastError(); \
114 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
115 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
116 } \
117 } while (0); \
118 CONTINUE(label)
119
120# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
121 DO_ENGAGE_EVENT1((so), (fdset1), label)
122
123# define DO_POLL_EVENTS(rc, error, so, events, label) \
124 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
125 if ((rc) == SOCKET_ERROR) \
126 { \
127 (error) = WSAGetLastError(); \
128 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
129 CONTINUE(label); \
130 }
131
132# define acceptds_win FD_ACCEPT
133# define acceptds_win_bit FD_ACCEPT_BIT
134# define readfds_win FD_READ
135# define readfds_win_bit FD_READ_BIT
136# define writefds_win FD_WRITE
137# define writefds_win_bit FD_WRITE_BIT
138# define xfds_win FD_OOB
139# define xfds_win_bit FD_OOB_BIT
140# define closefds_win FD_CLOSE
141# define closefds_win_bit FD_CLOSE_BIT
142
143# define closefds_win FD_CLOSE
144# define closefds_win_bit FD_CLOSE_BIT
145
146# define DO_CHECK_FD_SET(so, events, fdset) \
147 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
148
149# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
150# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
151
152#endif /* RT_OS_WINDOWS */
153
154#define TCP_ENGAGE_EVENT1(so, fdset) \
155 DO_ENGAGE_EVENT1((so), fdset, tcp)
156
157#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
158 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
159
160#define UDP_ENGAGE_EVENT(so, fdset) \
161 DO_ENGAGE_EVENT1((so), fdset, udp)
162
163#define POLL_TCP_EVENTS(rc, error, so, events) \
164 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
165
166#define POLL_UDP_EVENTS(rc, error, so, events) \
167 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
168
169#define CHECK_FD_SET(so, events, set) \
170 (DO_CHECK_FD_SET((so), (events), set))
171
172#define WIN_CHECK_FD_SET(so, events, set) \
173 (DO_WIN_CHECK_FD_SET((so), (events), set))
174
175#define UNIX_CHECK_FD_SET(so, events, set) \
176 (DO_UNIX_CHECK_FD_SET(so, events, set))
177
178/*
179 * Loging macros
180 */
181#if VBOX_WITH_DEBUG_NAT_SOCKETS
182# if defined(RT_OS_WINDOWS)
183# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
184 do { \
185 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
186 } while (0)
187# else /* !RT_OS_WINDOWS */
188# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
189 do { \
190 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
191 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
192 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
193 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
194 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
195 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
196 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
197 } while (0)
198# endif /* !RT_OS_WINDOWS */
199#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
200# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
201#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
202
203#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
204 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
205
206static void activate_port_forwarding(PNATState, const uint8_t *pEther);
207
208static const uint8_t special_ethaddr[6] =
209{
210 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
211};
212
213static const uint8_t broadcast_ethaddr[6] =
214{
215 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
216};
217
218const uint8_t zerro_ethaddr[6] =
219{
220 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
221};
222
223#ifdef RT_OS_WINDOWS
224static int get_dns_addr_domain(PNATState pData, bool fVerbose,
225 struct in_addr *pdns_addr,
226 const char **ppszDomain)
227{
228 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
229 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
230 PIP_ADAPTER_ADDRESSES pAddr = NULL;
231 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
232 ULONG size;
233 int wlen = 0;
234 char *pszSuffix;
235 struct dns_domain_entry *pDomain = NULL;
236 ULONG ret = ERROR_SUCCESS;
237
238 /* @todo add SKIPing flags to get only required information */
239
240 /* determine size of buffer */
241 size = 0;
242 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
243 if (ret != ERROR_BUFFER_OVERFLOW)
244 {
245 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
246 return -1;
247 }
248 if (size == 0)
249 {
250 LogRel(("NAT: Win socket API returns non capacity\n"));
251 return -1;
252 }
253
254 pAdapterAddr = RTMemAllocZ(size);
255 if (!pAdapterAddr)
256 {
257 LogRel(("NAT: No memory available \n"));
258 return -1;
259 }
260 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
261 if (ret != ERROR_SUCCESS)
262 {
263 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
264 RTMemFree(pAdapterAddr);
265 return -1;
266 }
267
268 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
269 {
270 int found;
271 if (pAddr->OperStatus != IfOperStatusUp)
272 continue;
273
274 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
275 {
276 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
277 struct in_addr InAddr;
278 struct dns_entry *pDns;
279
280 if (SockAddr->sa_family != AF_INET)
281 continue;
282
283 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
284
285 /* add dns server to list */
286 pDns = RTMemAllocZ(sizeof(struct dns_entry));
287 if (!pDns)
288 {
289 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
290 RTMemFree(pAdapterAddr);
291 return VERR_NO_MEMORY;
292 }
293
294 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
295 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
296 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
297 else
298 pDns->de_addr.s_addr = InAddr.s_addr;
299
300 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
301
302 if (pAddr->DnsSuffix == NULL)
303 continue;
304
305 /* uniq */
306 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
307 if (!pszSuffix || strlen(pszSuffix) == 0)
308 {
309 RTStrFree(pszSuffix);
310 continue;
311 }
312
313 found = 0;
314 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
315 {
316 if ( pDomain->dd_pszDomain != NULL
317 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
318 {
319 found = 1;
320 RTStrFree(pszSuffix);
321 break;
322 }
323 }
324 if (!found)
325 {
326 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
327 if (!pDomain)
328 {
329 LogRel(("NAT: not enough memory\n"));
330 RTStrFree(pszSuffix);
331 RTMemFree(pAdapterAddr);
332 return VERR_NO_MEMORY;
333 }
334 pDomain->dd_pszDomain = pszSuffix;
335 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
336 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
337 }
338 }
339 }
340 RTMemFree(pAdapterAddr);
341 return 0;
342}
343
344#else /* !RT_OS_WINDOWS */
345
346static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
347{
348 size_t cbRead;
349 char bTest;
350 int rc = VERR_NO_MEMORY;
351 char *pu8Buf = (char *)pvBuf;
352 *pcbRead = 0;
353
354 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
355 && (pu8Buf - (char *)pvBuf) < cbBufSize)
356 {
357 if (cbRead == 0)
358 return VERR_EOF;
359
360 if (bTest == '\r' || bTest == '\n')
361 {
362 *pu8Buf = 0;
363 return VINF_SUCCESS;
364 }
365 *pu8Buf = bTest;
366 pu8Buf++;
367 (*pcbRead)++;
368 }
369 return rc;
370}
371
372static int get_dns_addr_domain(PNATState pData, bool fVerbose,
373 struct in_addr *pdns_addr,
374 const char **ppszDomain)
375{
376 char buff[512];
377 char buff2[256];
378 RTFILE f;
379 int fFoundNameserver = 0;
380 struct in_addr tmp_addr;
381 int rc;
382 size_t bytes;
383
384# ifdef RT_OS_OS2
385 /* Try various locations. */
386 char *etc = getenv("ETC");
387 if (etc)
388 {
389 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
390 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
391 }
392 if (RT_FAILURE(rc))
393 {
394 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
395 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
396 }
397 if (RT_FAILURE(rc))
398 {
399 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
400 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
401 }
402# else /* !RT_OS_OS2 */
403# ifndef DEBUG_vvl
404 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
405# else
406 char *home = getenv("HOME");
407 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
408 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
409 if (RT_SUCCESS(rc))
410 {
411 Log(("NAT: DNS we're using %s\n", buff));
412 }
413 else
414 {
415 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
416 Log(("NAT: DNS we're using %s\n", buff));
417 }
418# endif
419# endif /* !RT_OS_OS2 */
420 if (RT_FAILURE(rc))
421 return -1;
422
423 if (ppszDomain)
424 *ppszDomain = NULL;
425
426 Log(("NAT: DNS Servers:\n"));
427 while ( RT_SUCCESS(rc = RTFileGets(f, buff, 512, &bytes))
428 && rc != VERR_EOF)
429 {
430 struct dns_entry *pDns = NULL;
431 if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1)
432 {
433 if (!inet_aton(buff2, &tmp_addr))
434 continue;
435
436 /* localhost mask */
437 pDns = RTMemAllocZ(sizeof (struct dns_entry));
438 if (!pDns)
439 {
440 LogRel(("can't alloc memory for DNS entry\n"));
441 return -1;
442 }
443
444 /* check */
445 pDns->de_addr.s_addr = tmp_addr.s_addr;
446 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
447 {
448 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
449 }
450 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
451 fFoundNameserver++;
452 }
453 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
454 {
455 char *tok;
456 char *saveptr;
457 struct dns_domain_entry *pDomain = NULL;
458 int fFoundDomain = 0;
459 tok = strtok_r(&buff[6], " \t\n", &saveptr);
460 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
461 {
462 if ( tok != NULL
463 && strcmp(tok, pDomain->dd_pszDomain) == 0)
464 {
465 fFoundDomain = 1;
466 break;
467 }
468 }
469 if (tok != NULL && !fFoundDomain)
470 {
471 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
472 if (!pDomain)
473 {
474 LogRel(("NAT: not enought memory to add domain list\n"));
475 return VERR_NO_MEMORY;
476 }
477 pDomain->dd_pszDomain = RTStrDup(tok);
478 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
479 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
480 }
481 }
482 }
483 RTFileClose(f);
484 if (!fFoundNameserver)
485 return -1;
486 return 0;
487}
488
489#endif /* !RT_OS_WINDOWS */
490
491static int slirp_init_dns_list(PNATState pData)
492{
493 TAILQ_INIT(&pData->pDnsList);
494 LIST_INIT(&pData->pDomainList);
495 return get_dns_addr_domain(pData, true, NULL, NULL);
496}
497
498static void slirp_release_dns_list(PNATState pData)
499{
500 struct dns_entry *pDns = NULL;
501 struct dns_domain_entry *pDomain = NULL;
502
503 while (!TAILQ_EMPTY(&pData->pDnsList))
504 {
505 pDns = TAILQ_FIRST(&pData->pDnsList);
506 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
507 RTMemFree(pDns);
508 }
509
510 while (!LIST_EMPTY(&pData->pDomainList))
511 {
512 pDomain = LIST_FIRST(&pData->pDomainList);
513 LIST_REMOVE(pDomain, dd_list);
514 if (pDomain->dd_pszDomain != NULL)
515 RTStrFree(pDomain->dd_pszDomain);
516 RTMemFree(pDomain);
517 }
518}
519
520int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
521{
522 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
523}
524
525#ifndef VBOX_WITH_NAT_SERVICE
526int slirp_init(PNATState *ppData, const char *pszNetAddr, uint32_t u32Netmask,
527 bool fPassDomain, bool fUseHostResolver, void *pvUser)
528#else
529int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
530 bool fPassDomain, bool fUseHostResolver, void *pvUser)
531#endif
532{
533 int fNATfailed = 0;
534 int rc;
535 PNATState pData = RTMemAllocZ(sizeof(NATState));
536 *ppData = pData;
537 if (!pData)
538 return VERR_NO_MEMORY;
539 if (u32Netmask & 0x1f)
540 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
541 return VERR_INVALID_PARAMETER;
542 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
543 pData->use_host_resolver = fUseHostResolver;
544 pData->pvUser = pvUser;
545 pData->netmask = u32Netmask;
546
547 /* sockets & TCP defaults */
548 pData->socket_rcv = 64 * _1K;
549 pData->socket_snd = 64 * _1K;
550 tcp_sndspace = 64 * _1K;
551 tcp_rcvspace = 64 * _1K;
552
553#ifdef RT_OS_WINDOWS
554 {
555 WSADATA Data;
556 WSAStartup(MAKEWORD(2, 0), &Data);
557 }
558 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
559#endif
560#ifdef VBOX_WITH_SLIRP_MT
561 QSOCKET_LOCK_CREATE(tcb);
562 QSOCKET_LOCK_CREATE(udb);
563 rc = RTReqCreateQueue(&pData->pReqQueue);
564 AssertReleaseRC(rc);
565#endif
566
567 link_up = 1;
568
569 rc = bootp_dhcp_init(pData);
570 if (rc != 0)
571 {
572 LogRel(("NAT: DHCP server initialization was failed\n"));
573 return VINF_NAT_DNS;
574 }
575 debug_init();
576 if_init(pData);
577 ip_init(pData);
578 icmp_init(pData);
579
580 /* Initialise mbufs *after* setting the MTU */
581#ifndef VBOX_WITH_SLIRP_BSD_MBUF
582 m_init(pData);
583#else
584 mbuf_init(pData);
585#endif
586
587#ifndef VBOX_WITH_NAT_SERVICE
588 inet_aton(pszNetAddr, &pData->special_addr);
589#else
590 pData->special_addr.s_addr = u32NetAddr;
591#endif
592 pData->slirp_ethaddr = &special_ethaddr[0];
593 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
594 /* @todo: add ability to configure this staff */
595
596 /* set default addresses */
597 inet_aton("127.0.0.1", &loopback_addr);
598 if (!pData->use_host_resolver)
599 {
600 if (slirp_init_dns_list(pData) < 0)
601 fNATfailed = 1;
602
603 dnsproxy_init(pData);
604 }
605
606 getouraddr(pData);
607 {
608 int flags = 0;
609 struct in_addr proxy_addr;
610 pData->proxy_alias = LibAliasInit(pData, NULL);
611 if (pData->proxy_alias == NULL)
612 {
613 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
614 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
615 }
616 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
617#ifndef NO_FW_PUNCH
618 flags |= PKT_ALIAS_PUNCH_FW;
619#endif
620 flags |= PKT_ALIAS_PROXY_ONLY; /* do transparent proxying */
621 flags |= PKT_ALIAS_LOG; /* set logging */
622 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
623 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
624 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
625 ftp_alias_load(pData);
626 nbt_alias_load(pData);
627 if (pData->use_host_resolver)
628 dns_alias_load(pData);
629 }
630 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
631}
632
633/**
634 * Register statistics.
635 */
636void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
637{
638#ifdef VBOX_WITH_STATISTICS
639# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
640# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
641# include "counters.h"
642# undef COUNTER
643/** @todo register statistics for the variables dumped by:
644 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
645 * mbufstats(pData); sockstats(pData); */
646#endif /* VBOX_WITH_STATISTICS */
647}
648
649/**
650 * Deregister statistics.
651 */
652void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
653{
654 if (pData == NULL)
655 return;
656#ifdef VBOX_WITH_STATISTICS
657# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
658# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
659# include "counters.h"
660#endif /* VBOX_WITH_STATISTICS */
661}
662
663/**
664 * Marks the link as up, making it possible to establish new connections.
665 */
666void slirp_link_up(PNATState pData)
667{
668 struct arp_cache_entry *ac;
669 link_up = 1;
670
671 if (LIST_EMPTY(&pData->arp_cache))
672 return;
673
674 LIST_FOREACH(ac, &pData->arp_cache, list)
675 {
676 activate_port_forwarding(pData, ac->ether);
677 }
678}
679
680/**
681 * Marks the link as down and cleans up the current connections.
682 */
683void slirp_link_down(PNATState pData)
684{
685 struct socket *so;
686 struct port_forward_rule *rule;
687
688 while ((so = tcb.so_next) != &tcb)
689 {
690 if (so->so_state & SS_NOFDREF || so->s == -1)
691 sofree(pData, so);
692 else
693 tcp_drop(pData, sototcpcb(so), 0);
694 }
695
696 while ((so = udb.so_next) != &udb)
697 udp_detach(pData, so);
698
699 /*
700 * Clear the active state of port-forwarding rules to force
701 * re-setup on restoration of communications.
702 */
703 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
704 {
705 rule->activated = 0;
706 }
707 pData->cRedirectionsActive = 0;
708
709 link_up = 0;
710}
711
712/**
713 * Terminates the slirp component.
714 */
715void slirp_term(PNATState pData)
716{
717 if (pData == NULL)
718 return;
719#ifdef RT_OS_WINDOWS
720 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
721 FreeLibrary(pData->hmIcmpLibrary);
722 RTMemFree(pData->pvIcmpBuffer);
723#else
724 closesocket(pData->icmp_socket.s);
725#endif
726
727 slirp_link_down(pData);
728 slirp_release_dns_list(pData);
729 ftp_alias_unload(pData);
730 nbt_alias_unload(pData);
731 if (pData->use_host_resolver)
732 dns_alias_unload(pData);
733 while (!LIST_EMPTY(&instancehead))
734 {
735 struct libalias *la = LIST_FIRST(&instancehead);
736 /* libalias do all clean up */
737 LibAliasUninit(la);
738 }
739 while (!LIST_EMPTY(&pData->arp_cache))
740 {
741 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
742 LIST_REMOVE(ac, list);
743 RTMemFree(ac);
744 }
745 bootp_dhcp_fini(pData);
746 m_fini(pData);
747#ifdef RT_OS_WINDOWS
748 WSACleanup();
749#endif
750#ifdef LOG_ENABLED
751 Log(("\n"
752 "NAT statistics\n"
753 "--------------\n"
754 "\n"));
755 ipstats(pData);
756 tcpstats(pData);
757 udpstats(pData);
758 icmpstats(pData);
759 mbufstats(pData);
760 sockstats(pData);
761 Log(("\n"
762 "\n"
763 "\n"));
764#endif
765 RTMemFree(pData);
766}
767
768
769#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
770#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
771
772/*
773 * curtime kept to an accuracy of 1ms
774 */
775static void updtime(PNATState pData)
776{
777#ifdef RT_OS_WINDOWS
778 struct _timeb tb;
779
780 _ftime(&tb);
781 curtime = (u_int)tb.time * (u_int)1000;
782 curtime += (u_int)tb.millitm;
783#else
784 gettimeofday(&tt, 0);
785
786 curtime = (u_int)tt.tv_sec * (u_int)1000;
787 curtime += (u_int)tt.tv_usec / (u_int)1000;
788
789 if ((tt.tv_usec % 1000) >= 500)
790 curtime++;
791#endif
792}
793
794#ifdef RT_OS_WINDOWS
795void slirp_select_fill(PNATState pData, int *pnfds)
796#else /* RT_OS_WINDOWS */
797void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
798#endif /* !RT_OS_WINDOWS */
799{
800 struct socket *so, *so_next;
801 int nfds;
802#if defined(RT_OS_WINDOWS)
803 int rc;
804 int error;
805#else
806 int poll_index = 0;
807#endif
808 int i;
809
810 STAM_PROFILE_START(&pData->StatFill, a);
811
812 nfds = *pnfds;
813
814 /*
815 * First, TCP sockets
816 */
817 do_slowtimo = 0;
818 if (!link_up)
819 goto done;
820
821 /*
822 * *_slowtimo needs calling if there are IP fragments
823 * in the fragment queue, or there are TCP connections active
824 */
825 /* XXX:
826 * triggering of fragment expiration should be the same but use new macroses
827 */
828 do_slowtimo = (tcb.so_next != &tcb);
829 if (!do_slowtimo)
830 {
831 for (i = 0; i < IPREASS_NHASH; i++)
832 {
833 if (!TAILQ_EMPTY(&ipq[i]))
834 {
835 do_slowtimo = 1;
836 break;
837 }
838 }
839 }
840 /* always add the ICMP socket */
841#ifndef RT_OS_WINDOWS
842 pData->icmp_socket.so_poll_index = -1;
843#endif
844 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
845
846 STAM_COUNTER_RESET(&pData->StatTCP);
847 STAM_COUNTER_RESET(&pData->StatTCPHot);
848
849 QSOCKET_FOREACH(so, so_next, tcp)
850 /* { */
851#if !defined(RT_OS_WINDOWS)
852 so->so_poll_index = -1;
853#endif
854#ifndef VBOX_WITH_SLIRP_BSD_MBUF
855 if (pData->fmbuf_water_line == 1)
856 {
857 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
858 {
859 pData->fmbuf_water_warn_sent = 0;
860 pData->fmbuf_water_line = 0;
861 }
862# ifndef RT_OS_WINDOWS
863 poll_index = 0;
864# endif
865 goto done;
866 }
867#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
868 STAM_COUNTER_INC(&pData->StatTCP);
869
870 /*
871 * See if we need a tcp_fasttimo
872 */
873 if ( time_fasttimo == 0
874 && so->so_tcpcb != NULL
875 && so->so_tcpcb->t_flags & TF_DELACK)
876 {
877 time_fasttimo = curtime; /* Flag when we want a fasttimo */
878 }
879
880 /*
881 * NOFDREF can include still connecting to local-host,
882 * newly socreated() sockets etc. Don't want to select these.
883 */
884 if (so->so_state & SS_NOFDREF || so->s == -1)
885 CONTINUE(tcp);
886
887 /*
888 * Set for reading sockets which are accepting
889 */
890 if (so->so_state & SS_FACCEPTCONN)
891 {
892 STAM_COUNTER_INC(&pData->StatTCPHot);
893 TCP_ENGAGE_EVENT1(so, readfds);
894 CONTINUE(tcp);
895 }
896
897 /*
898 * Set for writing sockets which are connecting
899 */
900 if (so->so_state & SS_ISFCONNECTING)
901 {
902 Log2(("connecting %R[natsock] engaged\n",so));
903 STAM_COUNTER_INC(&pData->StatTCPHot);
904 TCP_ENGAGE_EVENT1(so, writefds);
905 }
906
907 /*
908 * Set for writing if we are connected, can send more, and
909 * we have something to send
910 */
911 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
912 {
913 STAM_COUNTER_INC(&pData->StatTCPHot);
914 TCP_ENGAGE_EVENT1(so, writefds);
915 }
916
917 /*
918 * Set for reading (and urgent data) if we are connected, can
919 * receive more, and we have room for it XXX /2 ?
920 */
921 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
922 {
923 STAM_COUNTER_INC(&pData->StatTCPHot);
924 TCP_ENGAGE_EVENT2(so, readfds, xfds);
925 }
926 LOOP_LABEL(tcp, so, so_next);
927 }
928
929 /*
930 * UDP sockets
931 */
932 STAM_COUNTER_RESET(&pData->StatUDP);
933 STAM_COUNTER_RESET(&pData->StatUDPHot);
934
935 QSOCKET_FOREACH(so, so_next, udp)
936 /* { */
937
938#ifndef VBOX_WITH_SLIRP_BSD_MBUF
939 if (pData->fmbuf_water_line == 1)
940 {
941 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
942 {
943 pData->fmbuf_water_line = 0;
944 pData->fmbuf_water_warn_sent = 0;
945 }
946# ifndef RT_OS_WINDOWS
947 poll_index = 0;
948# endif
949 goto done;
950 }
951#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
952 STAM_COUNTER_INC(&pData->StatUDP);
953#if !defined(RT_OS_WINDOWS)
954 so->so_poll_index = -1;
955#endif
956
957 /*
958 * See if it's timed out
959 */
960 if (so->so_expire)
961 {
962 if (so->so_expire <= curtime)
963 {
964 Log2(("NAT: %R[natsock] expired\n", so));
965 if (so->so_timeout != NULL)
966 {
967 so->so_timeout(pData, so, so->so_timeout_arg);
968 }
969#ifdef VBOX_WITH_SLIRP_MT
970 /* we need so_next for continue our cycle*/
971 so_next = so->so_next;
972#endif
973 UDP_DETACH(pData, so, so_next);
974 CONTINUE_NO_UNLOCK(udp);
975 }
976 else
977 {
978 do_slowtimo = 1; /* Let socket expire */
979 }
980 }
981
982 /*
983 * When UDP packets are received from over the link, they're
984 * sendto()'d straight away, so no need for setting for writing
985 * Limit the number of packets queued by this session to 4.
986 * Note that even though we try and limit this to 4 packets,
987 * the session could have more queued if the packets needed
988 * to be fragmented.
989 *
990 * (XXX <= 4 ?)
991 */
992 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
993 {
994 STAM_COUNTER_INC(&pData->StatUDPHot);
995 UDP_ENGAGE_EVENT(so, readfds);
996 }
997 LOOP_LABEL(udp, so, so_next);
998 }
999done:
1000
1001#if defined(RT_OS_WINDOWS)
1002 *pnfds = VBOX_EVENT_COUNT;
1003#else /* RT_OS_WINDOWS */
1004 AssertRelease(poll_index <= *pnfds);
1005 *pnfds = poll_index;
1006#endif /* !RT_OS_WINDOWS */
1007
1008 STAM_PROFILE_STOP(&pData->StatFill, a);
1009}
1010
1011#if defined(RT_OS_WINDOWS)
1012void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1013#else /* RT_OS_WINDOWS */
1014void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1015#endif /* !RT_OS_WINDOWS */
1016{
1017 struct socket *so, *so_next;
1018 int ret;
1019#if defined(RT_OS_WINDOWS)
1020 WSANETWORKEVENTS NetworkEvents;
1021 int rc;
1022 int error;
1023#else
1024 int poll_index = 0;
1025#endif
1026
1027 STAM_PROFILE_START(&pData->StatPoll, a);
1028
1029 /* Update time */
1030 updtime(pData);
1031
1032 /*
1033 * See if anything has timed out
1034 */
1035 if (link_up)
1036 {
1037 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1038 {
1039 STAM_PROFILE_START(&pData->StatFastTimer, b);
1040 tcp_fasttimo(pData);
1041 time_fasttimo = 0;
1042 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1043 }
1044 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1045 {
1046 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1047 ip_slowtimo(pData);
1048 tcp_slowtimo(pData);
1049 last_slowtimo = curtime;
1050 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1051 }
1052 }
1053#if defined(RT_OS_WINDOWS)
1054 if (fTimeout)
1055 return; /* only timer update */
1056#endif
1057
1058 /*
1059 * Check sockets
1060 */
1061 if (!link_up)
1062 goto done;
1063#if defined(RT_OS_WINDOWS)
1064 /*XXX: before renaming please make see define
1065 * fIcmp in slirp_state.h
1066 */
1067 if (fIcmp)
1068 sorecvfrom(pData, &pData->icmp_socket);
1069#else
1070 if ( (pData->icmp_socket.s != -1)
1071 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1072 sorecvfrom(pData, &pData->icmp_socket);
1073#endif
1074 /*
1075 * Check TCP sockets
1076 */
1077 QSOCKET_FOREACH(so, so_next, tcp)
1078 /* { */
1079#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1080 if (pData->fmbuf_water_line == 1)
1081 {
1082 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1083 {
1084 pData->fmbuf_water_line = 0;
1085 pData->fmbuf_water_warn_sent = 0;
1086 }
1087 goto done;
1088 }
1089#endif
1090
1091#ifdef VBOX_WITH_SLIRP_MT
1092 if ( so->so_state & SS_NOFDREF
1093 && so->so_deleted == 1)
1094 {
1095 struct socket *son, *sop = NULL;
1096 QSOCKET_LOCK(tcb);
1097 if (so->so_next != NULL)
1098 {
1099 if (so->so_next != &tcb)
1100 SOCKET_LOCK(so->so_next);
1101 son = so->so_next;
1102 }
1103 if ( so->so_prev != &tcb
1104 && so->so_prev != NULL)
1105 {
1106 SOCKET_LOCK(so->so_prev);
1107 sop = so->so_prev;
1108 }
1109 QSOCKET_UNLOCK(tcb);
1110 remque(pData, so);
1111 NSOCK_DEC();
1112 SOCKET_UNLOCK(so);
1113 SOCKET_LOCK_DESTROY(so);
1114 RTMemFree(so);
1115 so_next = son;
1116 if (sop != NULL)
1117 SOCKET_UNLOCK(sop);
1118 CONTINUE_NO_UNLOCK(tcp);
1119 }
1120#endif
1121 /*
1122 * FD_ISSET is meaningless on these sockets
1123 * (and they can crash the program)
1124 */
1125 if (so->so_state & SS_NOFDREF || so->s == -1)
1126 CONTINUE(tcp);
1127
1128 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1129
1130 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1131
1132
1133 /*
1134 * Check for URG data
1135 * This will soread as well, so no need to
1136 * test for readfds below if this succeeds
1137 */
1138
1139 /* out-of-band data */
1140 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1141#ifdef RT_OS_DARWIN
1142 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1143 * combination on other Unixs hosts doesn't enter to this branch
1144 */
1145 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1146#endif
1147 )
1148 {
1149 sorecvoob(pData, so);
1150 }
1151
1152 /*
1153 * Check sockets for reading
1154 */
1155 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1156 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1157 {
1158 /*
1159 * Check for incoming connections
1160 */
1161 if (so->so_state & SS_FACCEPTCONN)
1162 {
1163 TCP_CONNECT(pData, so);
1164 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1165 CONTINUE(tcp);
1166 }
1167
1168 ret = soread(pData, so);
1169 /* Output it if we read something */
1170 if (RT_LIKELY(ret > 0))
1171 TCP_OUTPUT(pData, sototcpcb(so));
1172 }
1173
1174 /*
1175 * Check for FD_CLOSE events.
1176 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1177 */
1178 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1179 || (so->so_close == 1))
1180 {
1181 /*
1182 * drain the socket
1183 */
1184 for (;;)
1185 {
1186 ret = soread(pData, so);
1187 if (ret > 0)
1188 TCP_OUTPUT(pData, sototcpcb(so));
1189 else
1190 {
1191 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1192 break;
1193 }
1194 }
1195 /* mark the socket for termination _after_ it was drained */
1196 so->so_close = 1;
1197 CONTINUE(tcp);
1198 }
1199
1200 /*
1201 * Check sockets for writing
1202 */
1203 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1204 {
1205 /*
1206 * Check for non-blocking, still-connecting sockets
1207 */
1208 if (so->so_state & SS_ISFCONNECTING)
1209 {
1210 Log2(("connecting %R[natsock] catched\n", so));
1211 /* Connected */
1212 so->so_state &= ~SS_ISFCONNECTING;
1213
1214 /*
1215 * This should be probably guarded by PROBE_CONN too. Anyway,
1216 * we disable it on OS/2 because the below send call returns
1217 * EFAULT which causes the opened TCP socket to close right
1218 * after it has been opened and connected.
1219 */
1220#ifndef RT_OS_OS2
1221 ret = send(so->s, (const char *)&ret, 0, 0);
1222 if (ret < 0)
1223 {
1224 /* XXXXX Must fix, zero bytes is a NOP */
1225 if ( errno == EAGAIN
1226 || errno == EWOULDBLOCK
1227 || errno == EINPROGRESS
1228 || errno == ENOTCONN)
1229 CONTINUE(tcp);
1230
1231 /* else failed */
1232 so->so_state = SS_NOFDREF;
1233 }
1234 /* else so->so_state &= ~SS_ISFCONNECTING; */
1235#endif
1236
1237 /*
1238 * Continue tcp_input
1239 */
1240 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1241 /* continue; */
1242 }
1243 else
1244 SOWRITE(ret, pData, so);
1245 /*
1246 * XXX If we wrote something (a lot), there could be the need
1247 * for a window update. In the worst case, the remote will send
1248 * a window probe to get things going again.
1249 */
1250 }
1251
1252 /*
1253 * Probe a still-connecting, non-blocking socket
1254 * to check if it's still alive
1255 */
1256#ifdef PROBE_CONN
1257 if (so->so_state & SS_ISFCONNECTING)
1258 {
1259 ret = recv(so->s, (char *)&ret, 0, 0);
1260
1261 if (ret < 0)
1262 {
1263 /* XXX */
1264 if ( errno == EAGAIN
1265 || errno == EWOULDBLOCK
1266 || errno == EINPROGRESS
1267 || errno == ENOTCONN)
1268 {
1269 CONTINUE(tcp); /* Still connecting, continue */
1270 }
1271
1272 /* else failed */
1273 so->so_state = SS_NOFDREF;
1274
1275 /* tcp_input will take care of it */
1276 }
1277 else
1278 {
1279 ret = send(so->s, &ret, 0, 0);
1280 if (ret < 0)
1281 {
1282 /* XXX */
1283 if ( errno == EAGAIN
1284 || errno == EWOULDBLOCK
1285 || errno == EINPROGRESS
1286 || errno == ENOTCONN)
1287 {
1288 CONTINUE(tcp);
1289 }
1290 /* else failed */
1291 so->so_state = SS_NOFDREF;
1292 }
1293 else
1294 so->so_state &= ~SS_ISFCONNECTING;
1295
1296 }
1297 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1298 } /* SS_ISFCONNECTING */
1299#endif
1300 LOOP_LABEL(tcp, so, so_next);
1301 }
1302
1303 /*
1304 * Now UDP sockets.
1305 * Incoming packets are sent straight away, they're not buffered.
1306 * Incoming UDP data isn't buffered either.
1307 */
1308 QSOCKET_FOREACH(so, so_next, udp)
1309 /* { */
1310#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1311 if (pData->fmbuf_water_line == 1)
1312 {
1313 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1314 {
1315 pData->fmbuf_water_line = 0;
1316 pData->fmbuf_water_warn_sent = 0;
1317 }
1318 goto done;
1319 }
1320#endif
1321#ifdef VBOX_WITH_SLIRP_MT
1322 if ( so->so_state & SS_NOFDREF
1323 && so->so_deleted == 1)
1324 {
1325 struct socket *son, *sop = NULL;
1326 QSOCKET_LOCK(udb);
1327 if (so->so_next != NULL)
1328 {
1329 if (so->so_next != &udb)
1330 SOCKET_LOCK(so->so_next);
1331 son = so->so_next;
1332 }
1333 if ( so->so_prev != &udb
1334 && so->so_prev != NULL)
1335 {
1336 SOCKET_LOCK(so->so_prev);
1337 sop = so->so_prev;
1338 }
1339 QSOCKET_UNLOCK(udb);
1340 remque(pData, so);
1341 NSOCK_DEC();
1342 SOCKET_UNLOCK(so);
1343 SOCKET_LOCK_DESTROY(so);
1344 RTMemFree(so);
1345 so_next = son;
1346 if (sop != NULL)
1347 SOCKET_UNLOCK(sop);
1348 CONTINUE_NO_UNLOCK(udp);
1349 }
1350#endif
1351 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1352
1353 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1354
1355 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1356 {
1357 SORECVFROM(pData, so);
1358 }
1359 LOOP_LABEL(udp, so, so_next);
1360 }
1361
1362done:
1363#if 0
1364 /*
1365 * See if we can start outputting
1366 */
1367 if (if_queued && link_up)
1368 if_start(pData);
1369#endif
1370
1371 STAM_PROFILE_STOP(&pData->StatPoll, a);
1372}
1373
1374
1375struct arphdr
1376{
1377 unsigned short ar_hrd; /* format of hardware address */
1378 unsigned short ar_pro; /* format of protocol address */
1379 unsigned char ar_hln; /* length of hardware address */
1380 unsigned char ar_pln; /* length of protocol address */
1381 unsigned short ar_op; /* ARP opcode (command) */
1382
1383 /*
1384 * Ethernet looks like this : This bit is variable sized however...
1385 */
1386 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1387 unsigned char ar_sip[4]; /* sender IP address */
1388 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1389 unsigned char ar_tip[4]; /* target IP address */
1390};
1391AssertCompileSize(struct arphdr, 28);
1392
1393static void arp_input(PNATState pData, struct mbuf *m)
1394{
1395 struct ethhdr *eh;
1396 struct ethhdr *reh;
1397 struct arphdr *ah;
1398 struct arphdr *rah;
1399 int ar_op;
1400 struct ex_list *ex_ptr;
1401 uint32_t htip;
1402 uint32_t tip;
1403 struct mbuf *mr;
1404 eh = mtod(m, struct ethhdr *);
1405 ah = (struct arphdr *)&eh[1];
1406 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1407 tip = *(uint32_t*)ah->ar_tip;
1408
1409 ar_op = RT_N2H_U16(ah->ar_op);
1410
1411 switch (ar_op)
1412 {
1413 case ARPOP_REQUEST:
1414#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1415 mr = m_get(pData);
1416
1417 reh = mtod(mr, struct ethhdr *);
1418 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1419 Log4(("NAT: arp:%R[ether]->%R[ether]\n",
1420 reh->h_source, reh->h_dest));
1421 Log4(("NAT: arp: %R[IP4]\n", &tip));
1422
1423 mr->m_data += if_maxlinkhdr;
1424 mr->m_len = sizeof(struct arphdr);
1425 rah = mtod(mr, struct arphdr *);
1426#else
1427 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1428 reh = mtod(mr, struct ethhdr *);
1429 mr->m_data += ETH_HLEN;
1430 rah = mtod(mr, struct arphdr *);
1431 mr->m_len = sizeof(struct arphdr);
1432 Assert(mr);
1433 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1434#endif
1435#ifdef VBOX_WITH_NAT_SERVICE
1436 if (tip == pData->special_addr.s_addr)
1437 goto arp_ok;
1438#endif
1439 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1440 {
1441 if ( CTL_CHECK(htip, CTL_DNS)
1442 || CTL_CHECK(htip, CTL_ALIAS)
1443 || CTL_CHECK(htip, CTL_TFTP))
1444 goto arp_ok;
1445 for (ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next)
1446 {
1447 if ((htip & ~pData->netmask) == ex_ptr->ex_addr)
1448 {
1449 goto arp_ok;
1450 }
1451 }
1452 m_free(pData, m);
1453 m_free(pData, mr);
1454 return;
1455
1456 arp_ok:
1457 rah->ar_hrd = RT_H2N_U16_C(1);
1458 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1459 rah->ar_hln = ETH_ALEN;
1460 rah->ar_pln = 4;
1461 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1462 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1463
1464 switch (htip & ~pData->netmask)
1465 {
1466 case CTL_DNS:
1467 case CTL_ALIAS:
1468 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1469 break;
1470 default:;
1471 }
1472
1473 memcpy(rah->ar_sip, ah->ar_tip, 4);
1474 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1475 memcpy(rah->ar_tip, ah->ar_sip, 4);
1476 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1477 m_free(pData, m);
1478 }
1479 /* Gratuitous ARP */
1480 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1481 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1482 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1483 {
1484 /* we've received anounce about address asignment
1485 * Let's do ARP cache update
1486 */
1487 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1488 {
1489 m_free(pData, mr);
1490 m_free(pData, m);
1491 break;
1492 }
1493 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1494 }
1495 break;
1496
1497 case ARPOP_REPLY:
1498 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1499 {
1500 m_free(pData, m);
1501 break;
1502 }
1503 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1504 m_free(pData, m);
1505 break;
1506
1507 default:
1508 break;
1509 }
1510}
1511
1512/**
1513 * Feed a packet into the slirp engine.
1514 *
1515 * @param m Data buffer, m_len is not valid.
1516 * @param cbBuf The length of the data in m.
1517 */
1518void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1519{
1520 int proto;
1521 static bool fWarnedIpv6;
1522 struct ethhdr *eh;
1523 uint8_t au8Ether[ETH_ALEN];
1524
1525 m->m_len = cbBuf;
1526 if (cbBuf < ETH_HLEN)
1527 {
1528 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1529 m_free(pData, m);
1530 return;
1531 }
1532 eh = mtod(m, struct ethhdr *);
1533 proto = RT_N2H_U16(eh->h_proto);
1534
1535 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1536
1537 switch(proto)
1538 {
1539 case ETH_P_ARP:
1540 arp_input(pData, m);
1541 break;
1542
1543 case ETH_P_IP:
1544 /* Update time. Important if the network is very quiet, as otherwise
1545 * the first outgoing connection gets an incorrect timestamp. */
1546 updtime(pData);
1547 m_adj(m, ETH_HLEN);
1548#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1549 M_ASSERTPKTHDR(m);
1550 m->m_pkthdr.header = mtod(m, void *);
1551#else /* !VBOX_WITH_SLIRP_BSD_MBUF */
1552 if ( pData->fmbuf_water_line
1553 && pData->fmbuf_water_warn_sent == 0
1554 && (curtime - pData->tsmbuf_water_warn_sent) > 500)
1555 {
1556 icmp_error(pData, m, ICMP_SOURCEQUENCH, 0, 0, "Out of resources!!!");
1557 pData->fmbuf_water_warn_sent = 1;
1558 pData->tsmbuf_water_warn_sent = curtime;
1559 }
1560#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
1561 ip_input(pData, m);
1562 break;
1563
1564 case ETH_P_IPV6:
1565 m_free(pData, m);
1566 if (!fWarnedIpv6)
1567 {
1568 LogRel(("NAT: IPv6 not supported\n"));
1569 fWarnedIpv6 = true;
1570 }
1571 break;
1572
1573 default:
1574 Log(("NAT: Unsupported protocol %x\n", proto));
1575 m_free(pData, m);
1576 break;
1577 }
1578
1579 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1580 activate_port_forwarding(pData, au8Ether);
1581}
1582
1583/* output the IP packet to the ethernet device */
1584void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1585{
1586 struct ethhdr *eh;
1587 uint8_t *buf = NULL;
1588 size_t mlen = 0;
1589 STAM_PROFILE_START(&pData->StatIF_encap, a);
1590
1591#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1592 m->m_data -= if_maxlinkhdr;
1593 m->m_len += ETH_HLEN;
1594 eh = mtod(m, struct ethhdr *);
1595
1596 if (MBUF_HEAD(m) != m->m_data)
1597 {
1598 LogRel(("NAT: ethernet detects corruption of the packet"));
1599 AssertMsgFailed(("!!Ethernet frame corrupted!!"));
1600 }
1601#else
1602 M_ASSERTPKTHDR(m);
1603 m->m_data -= ETH_HLEN;
1604 m->m_len += ETH_HLEN;
1605 eh = mtod(m, struct ethhdr *);
1606#endif
1607
1608 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1609 {
1610 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1611 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1612 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1613 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1614 {
1615 /* don't do anything */
1616 m_free(pData, m);
1617 goto done;
1618 }
1619 }
1620#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1621 mlen = m->m_len;
1622#else
1623 mlen = m_length(m, NULL);
1624 buf = RTMemAlloc(mlen);
1625 if (buf == NULL)
1626 {
1627 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1628 m_free(pData, m);
1629 goto done;
1630 }
1631#endif
1632 eh->h_proto = RT_H2N_U16(eth_proto);
1633#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1634 m_copydata(m, 0, mlen, (char *)buf);
1635 if (flags & ETH_ENCAP_URG)
1636 slirp_urg_output(pData->pvUser, m, buf, mlen);
1637 else
1638 slirp_output(pData->pvUser, m, buf, mlen);
1639#else
1640 if (flags & ETH_ENCAP_URG)
1641 slirp_urg_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1642 else
1643 slirp_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1644#endif
1645done:
1646 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1647}
1648
1649/**
1650 * Still we're using dhcp server leasing to map ether to IP
1651 * @todo see rt_lookup_in_cache
1652 */
1653static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1654{
1655 uint32_t ip = INADDR_ANY;
1656 int rc;
1657
1658 if (eth_addr == NULL)
1659 return INADDR_ANY;
1660
1661 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1662 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1663 return INADDR_ANY;
1664
1665 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1666 if (RT_SUCCESS(rc))
1667 return ip;
1668
1669 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1670 /* ignore return code, ip will be set to INADDR_ANY on error */
1671 return ip;
1672}
1673
1674/**
1675 * We need check if we've activated port forwarding
1676 * for specific machine ... that of course relates to
1677 * service mode
1678 * @todo finish this for service case
1679 */
1680static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1681{
1682 struct port_forward_rule *rule;
1683
1684 /* check mac here */
1685 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1686 {
1687 struct socket *so;
1688 struct alias_link *alias_link;
1689 struct libalias *lib;
1690 int flags;
1691 struct sockaddr sa;
1692 struct sockaddr_in *psin;
1693 socklen_t socketlen;
1694 struct in_addr alias;
1695 int rc;
1696 uint32_t guest_addr; /* need to understand if we already give address to guest */
1697
1698 if (rule->activated)
1699 continue;
1700
1701#ifdef VBOX_WITH_NAT_SERVICE
1702 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1703 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1704 guest_addr = find_guest_ip(pData, h_source);
1705#else
1706#if 0
1707 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1708 continue;
1709#endif
1710 guest_addr = find_guest_ip(pData, h_source);
1711#endif
1712 if (guest_addr == INADDR_ANY)
1713 {
1714 /* the address wasn't granted */
1715 return;
1716 }
1717
1718#if !defined(VBOX_WITH_NAT_SERVICE)
1719 if (rule->guest_addr.s_addr != guest_addr)
1720 continue;
1721#endif
1722
1723 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1724 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1725 rule->host_port, rule->guest_port, &guest_addr));
1726
1727 if (rule->proto == IPPROTO_UDP)
1728 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1729 RT_H2N_U16(rule->guest_port), 0);
1730 else
1731 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1732 RT_H2N_U16(rule->guest_port), 0);
1733
1734 if (so == NULL)
1735 goto remove_port_forwarding;
1736
1737 psin = (struct sockaddr_in *)&sa;
1738 psin->sin_family = AF_INET;
1739 psin->sin_port = 0;
1740 psin->sin_addr.s_addr = INADDR_ANY;
1741 socketlen = sizeof(struct sockaddr);
1742
1743 rc = getsockname(so->s, &sa, &socketlen);
1744 if (rc < 0 || sa.sa_family != AF_INET)
1745 goto remove_port_forwarding;
1746
1747 psin = (struct sockaddr_in *)&sa;
1748
1749 lib = LibAliasInit(pData, NULL);
1750 flags = LibAliasSetMode(lib, 0, 0);
1751 flags |= PKT_ALIAS_LOG; /* set logging */
1752 flags |= PKT_ALIAS_PROXY_ONLY; /* do transparent proxying */
1753 flags |= PKT_ALIAS_REVERSE; /* set logging */
1754 flags = LibAliasSetMode(lib, flags, ~0);
1755
1756 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1757 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1758 alias, RT_H2N_U16(rule->guest_port),
1759 pData->special_addr, -1, /* not very clear for now */
1760 rule->proto);
1761 if (!alias_link)
1762 goto remove_port_forwarding;
1763
1764 so->so_la = lib;
1765 rule->activated = 1;
1766 pData->cRedirectionsActive++;
1767 continue;
1768
1769 remove_port_forwarding:
1770 LogRel(("NAT: failed to redirect %s %d => %d\n",
1771 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1772 LIST_REMOVE(rule, list);
1773 pData->cRedirectionsStored--;
1774 RTMemFree(rule);
1775 }
1776}
1777
1778/**
1779 * Changes in 3.1 instead of opening new socket do the following:
1780 * gain more information:
1781 * 1. bind IP
1782 * 2. host port
1783 * 3. guest port
1784 * 4. proto
1785 * 5. guest MAC address
1786 * the guest's MAC address is rather important for service, but we easily
1787 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1788 * corresponding port-forwarding
1789 */
1790int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1791 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1792{
1793 struct port_forward_rule *rule = NULL;
1794 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1795
1796 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1797 if (rule == NULL)
1798 return 1;
1799
1800 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1801 rule->host_port = host_port;
1802 rule->guest_port = guest_port;
1803#ifndef VBOX_WITH_NAT_SERVICE
1804 rule->guest_addr.s_addr = guest_addr.s_addr;
1805#endif
1806 rule->bind_ip.s_addr = host_addr.s_addr;
1807 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1808 /* @todo add mac address */
1809 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1810 pData->cRedirectionsStored++;
1811 return 0;
1812}
1813
1814int slirp_add_exec(PNATState pData, int do_pty, const char *args, int addr_low_byte,
1815 int guest_port)
1816{
1817 return add_exec(&exec_list, do_pty, (char *)args,
1818 addr_low_byte, RT_H2N_U16(guest_port));
1819}
1820
1821void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1822{
1823#ifndef VBOX_WITH_NAT_SERVICE
1824 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1825#endif
1826 if (GuestIP != INADDR_ANY)
1827 {
1828 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1829 activate_port_forwarding(pData, ethaddr);
1830 }
1831}
1832
1833#if defined(RT_OS_WINDOWS)
1834HANDLE *slirp_get_events(PNATState pData)
1835{
1836 return pData->phEvents;
1837}
1838void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1839{
1840 pData->phEvents[index] = hEvent;
1841}
1842#endif
1843
1844unsigned int slirp_get_timeout_ms(PNATState pData)
1845{
1846 if (link_up)
1847 {
1848 if (time_fasttimo)
1849 return 2;
1850 if (do_slowtimo)
1851 return 500; /* see PR_SLOWHZ */
1852 }
1853 return 3600*1000; /* one hour */
1854}
1855
1856#ifndef RT_OS_WINDOWS
1857int slirp_get_nsock(PNATState pData)
1858{
1859 return pData->nsock;
1860}
1861#endif
1862
1863/*
1864 * this function called from NAT thread
1865 */
1866void slirp_post_sent(PNATState pData, void *pvArg)
1867{
1868 struct socket *so = 0;
1869 struct tcpcb *tp = 0;
1870 struct mbuf *m = (struct mbuf *)pvArg;
1871 m_free(pData, m);
1872}
1873#ifdef VBOX_WITH_SLIRP_MT
1874void slirp_process_queue(PNATState pData)
1875{
1876 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1877}
1878void *slirp_get_queue(PNATState pData)
1879{
1880 return pData->pReqQueue;
1881}
1882#endif
1883
1884void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1885{
1886 Log2(("tftp_prefix:%s\n", tftpPrefix));
1887 tftp_prefix = tftpPrefix;
1888}
1889
1890void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1891{
1892 Log2(("bootFile:%s\n", bootFile));
1893 bootp_filename = bootFile;
1894}
1895
1896void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1897{
1898 Log2(("next_server:%s\n", next_server));
1899 if (next_server == NULL)
1900 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1901 else
1902 inet_aton(next_server, &pData->tftp_server);
1903}
1904
1905int slirp_set_binding_address(PNATState pData, char *addr)
1906{
1907 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1908 {
1909 pData->bindIP.s_addr = INADDR_ANY;
1910 return 1;
1911 }
1912 return 0;
1913}
1914
1915void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1916{
1917 if (!pData->use_host_resolver)
1918 {
1919 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1920 pData->use_dns_proxy = fDNSProxy;
1921 }
1922 else
1923 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1924}
1925
1926#define CHECK_ARG(name, val, lim_min, lim_max) \
1927 do { \
1928 if ((val) < (lim_min) || (val) > (lim_max)) \
1929 { \
1930 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1931 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1932 return; \
1933 } \
1934 else \
1935 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1936 } while (0)
1937
1938/* don't allow user set less 8kB and more than 1M values */
1939#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1940void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1941{
1942 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1943 pData->socket_rcv = kilobytes;
1944}
1945void slirp_set_sndbuf(PNATState pData, int kilobytes)
1946{
1947 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1948 pData->socket_snd = kilobytes * _1K;
1949}
1950void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1951{
1952 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1953 tcp_rcvspace = kilobytes * _1K;
1954}
1955void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1956{
1957 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1958 tcp_sndspace = kilobytes * _1K;
1959}
1960
1961/*
1962 * Looking for Ether by ip in ARP-cache
1963 * Note: it´s responsible of caller to allocate buffer for result
1964 * @returns iprt status code
1965 */
1966int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1967{
1968 struct arp_cache_entry *ac;
1969
1970 if (ether == NULL)
1971 return VERR_INVALID_PARAMETER;
1972
1973 if (LIST_EMPTY(&pData->arp_cache))
1974 return VERR_NOT_FOUND;
1975
1976 LIST_FOREACH(ac, &pData->arp_cache, list)
1977 {
1978 if (ac->ip == ip)
1979 {
1980 memcpy(ether, ac->ether, ETH_ALEN);
1981 return VINF_SUCCESS;
1982 }
1983 }
1984 return VERR_NOT_FOUND;
1985}
1986
1987/*
1988 * Looking for IP by Ether in ARP-cache
1989 * Note: it´s responsible of caller to allocate buffer for result
1990 * @returns 0 - if found, 1 - otherwise
1991 */
1992int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1993{
1994 struct arp_cache_entry *ac;
1995 *ip = INADDR_ANY;
1996
1997 if (LIST_EMPTY(&pData->arp_cache))
1998 return VERR_NOT_FOUND;
1999
2000 LIST_FOREACH(ac, &pData->arp_cache, list)
2001 {
2002 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
2003 {
2004 *ip = ac->ip;
2005 return VINF_SUCCESS;
2006 }
2007 }
2008 return VERR_NOT_FOUND;
2009}
2010
2011void slirp_arp_who_has(PNATState pData, uint32_t dst)
2012{
2013 struct mbuf *m;
2014 struct ethhdr *ehdr;
2015 struct arphdr *ahdr;
2016
2017#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2018 m = m_get(pData);
2019#else
2020 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
2021#endif
2022 if (m == NULL)
2023 {
2024 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
2025 return;
2026 }
2027 ehdr = mtod(m, struct ethhdr *);
2028 memset(ehdr->h_source, 0xff, ETH_ALEN);
2029 ahdr = (struct arphdr *)&ehdr[1];
2030 ahdr->ar_hrd = RT_H2N_U16_C(1);
2031 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2032 ahdr->ar_hln = ETH_ALEN;
2033 ahdr->ar_pln = 4;
2034 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2035 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2036 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2037 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2038 *(uint32_t *)ahdr->ar_tip = dst;
2039#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2040 m->m_data += if_maxlinkhdr;
2041 m->m_len = sizeof(struct arphdr);
2042#else
2043 /* warn!!! should falls in mbuf minimal size */
2044 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2045 m->m_data += ETH_HLEN;
2046 m->m_len -= ETH_HLEN;
2047#endif
2048 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2049}
2050
2051int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2052{
2053 if (slirp_arp_cache_update(pData, dst, mac))
2054 slirp_arp_cache_add(pData, dst, mac);
2055
2056 return 0;
2057}
2058
2059/* updates the arp cache
2060 * @returns 0 - if has found and updated
2061 * 1 - if hasn't found.
2062 */
2063int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2064{
2065 struct arp_cache_entry *ac;
2066 LIST_FOREACH(ac, &pData->arp_cache, list)
2067 {
2068 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
2069 {
2070 ac->ip = dst;
2071 return 0;
2072 }
2073 }
2074 return 1;
2075}
2076
2077void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2078{
2079 struct arp_cache_entry *ac = NULL;
2080 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2081 if (ac == NULL)
2082 {
2083 LogRel(("NAT: Can't allocate arp cache entry\n"));
2084 return;
2085 }
2086 ac->ip = ip;
2087 memcpy(ac->ether, ether, ETH_ALEN);
2088 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2089}
2090
2091#ifdef VBOX_WITH_SLIRP_BSD_MBUF
2092void slirp_set_mtu(PNATState pData, int mtu)
2093{
2094 if (mtu < 20 || mtu >= 16000)
2095 {
2096 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2097 mtu = 1500;
2098 }
2099 if_mtu =
2100 if_mru = mtu;
2101}
2102#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette