VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 28320

Last change on this file since 28320 was 28173, checked in by vboxsync, 15 years ago

NAT: warnings.

  • Property svn:eol-style set to native
File size: 63.5 KB
Line 
1#include "slirp.h"
2#ifdef RT_OS_OS2
3# include <paths.h>
4#endif
5
6#include <VBox/err.h>
7#include <VBox/pdmdrv.h>
8#include <iprt/assert.h>
9#include <iprt/file.h>
10#ifndef RT_OS_WINDOWS
11# include <sys/ioctl.h>
12# include <poll.h>
13#else
14# include <Winnls.h>
15# define _WINSOCK2API_
16# include <IPHlpApi.h>
17#endif
18#include <alias.h>
19
20#ifndef RT_OS_WINDOWS
21
22# define DO_ENGAGE_EVENT1(so, fdset, label) \
23 do { \
24 if ( so->so_poll_index != -1 \
25 && so->s == polls[so->so_poll_index].fd) \
26 { \
27 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
28 break; \
29 } \
30 AssertRelease(poll_index < (nfds)); \
31 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
32 polls[poll_index].fd = (so)->s; \
33 (so)->so_poll_index = poll_index; \
34 polls[poll_index].events = N_(fdset ## _poll); \
35 polls[poll_index].revents = 0; \
36 poll_index++; \
37 } while (0)
38
39# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
40 do { \
41 if ( so->so_poll_index != -1 \
42 && so->s == polls[so->so_poll_index].fd) \
43 { \
44 polls[so->so_poll_index].events |= \
45 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
46 break; \
47 } \
48 AssertRelease(poll_index < (nfds)); \
49 polls[poll_index].fd = (so)->s; \
50 (so)->so_poll_index = poll_index; \
51 polls[poll_index].events = \
52 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
53 poll_index++; \
54 } while (0)
55
56# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
57
58/*
59 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
60 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
61 * used to catch POLLNVAL while logging and return false in case of error while
62 * normal usage.
63 */
64# define DO_CHECK_FD_SET(so, events, fdset) \
65 ( ((so)->so_poll_index != -1) \
66 && ((so)->so_poll_index <= ndfs) \
67 && ((so)->s == polls[so->so_poll_index].fd) \
68 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
69 && ( N_(fdset ## _poll) == POLLNVAL \
70 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
71
72 /* specific for Unix API */
73# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
74 /* specific for Windows Winsock API */
75# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
76
77# ifndef RT_OS_LINUX
78# define readfds_poll (POLLRDNORM)
79# define writefds_poll (POLLWRNORM)
80# else
81# define readfds_poll (POLLIN)
82# define writefds_poll (POLLOUT)
83# endif
84# define xfds_poll (POLLPRI)
85# define closefds_poll (POLLHUP)
86# define rderr_poll (POLLERR)
87# define rdhup_poll (POLLHUP)
88# define nval_poll (POLLNVAL)
89
90# define ICMP_ENGAGE_EVENT(so, fdset) \
91 do { \
92 if (pData->icmp_socket.s != -1) \
93 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
94 } while (0)
95
96#else /* RT_OS_WINDOWS */
97
98/*
99 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
100 * So no call to WSAEventSelect necessary.
101 */
102# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
103
104/*
105 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
106 */
107# define DO_ENGAGE_EVENT1(so, fdset1, label) \
108 do { \
109 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
110 if (rc == SOCKET_ERROR) \
111 { \
112 /* This should not happen */ \
113 error = WSAGetLastError(); \
114 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
115 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
116 } \
117 } while (0); \
118 CONTINUE(label)
119
120# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
121 DO_ENGAGE_EVENT1((so), (fdset1), label)
122
123# define DO_POLL_EVENTS(rc, error, so, events, label) \
124 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
125 if ((rc) == SOCKET_ERROR) \
126 { \
127 (error) = WSAGetLastError(); \
128 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
129 CONTINUE(label); \
130 }
131
132# define acceptds_win FD_ACCEPT
133# define acceptds_win_bit FD_ACCEPT_BIT
134# define readfds_win FD_READ
135# define readfds_win_bit FD_READ_BIT
136# define writefds_win FD_WRITE
137# define writefds_win_bit FD_WRITE_BIT
138# define xfds_win FD_OOB
139# define xfds_win_bit FD_OOB_BIT
140# define closefds_win FD_CLOSE
141# define closefds_win_bit FD_CLOSE_BIT
142
143# define closefds_win FD_CLOSE
144# define closefds_win_bit FD_CLOSE_BIT
145
146# define DO_CHECK_FD_SET(so, events, fdset) \
147 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
148
149# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
150# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
151
152#endif /* RT_OS_WINDOWS */
153
154#define TCP_ENGAGE_EVENT1(so, fdset) \
155 DO_ENGAGE_EVENT1((so), fdset, tcp)
156
157#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
158 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
159
160#define UDP_ENGAGE_EVENT(so, fdset) \
161 DO_ENGAGE_EVENT1((so), fdset, udp)
162
163#define POLL_TCP_EVENTS(rc, error, so, events) \
164 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
165
166#define POLL_UDP_EVENTS(rc, error, so, events) \
167 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
168
169#define CHECK_FD_SET(so, events, set) \
170 (DO_CHECK_FD_SET((so), (events), set))
171
172#define WIN_CHECK_FD_SET(so, events, set) \
173 (DO_WIN_CHECK_FD_SET((so), (events), set))
174
175#define UNIX_CHECK_FD_SET(so, events, set) \
176 (DO_UNIX_CHECK_FD_SET(so, events, set))
177
178/*
179 * Loging macros
180 */
181#if VBOX_WITH_DEBUG_NAT_SOCKETS
182# if defined(RT_OS_WINDOWS)
183# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
184 do { \
185 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
186 } while (0)
187# else /* !RT_OS_WINDOWS */
188# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
189 do { \
190 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
191 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
192 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
193 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
194 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
195 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
196 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
197 } while (0)
198# endif /* !RT_OS_WINDOWS */
199#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
200# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
201#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
202
203#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
204 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
205
206static void activate_port_forwarding(PNATState, const uint8_t *pEther);
207
208static const uint8_t special_ethaddr[6] =
209{
210 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
211};
212
213static const uint8_t broadcast_ethaddr[6] =
214{
215 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
216};
217
218const uint8_t zerro_ethaddr[6] =
219{
220 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
221};
222
223#ifdef RT_OS_WINDOWS
224static int get_dns_addr_domain(PNATState pData, bool fVerbose,
225 struct in_addr *pdns_addr,
226 const char **ppszDomain)
227{
228 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
229 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
230 PIP_ADAPTER_ADDRESSES pAddr = NULL;
231 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
232 ULONG size;
233 int wlen = 0;
234 char *pszSuffix;
235 struct dns_domain_entry *pDomain = NULL;
236 ULONG ret = ERROR_SUCCESS;
237
238 /* @todo add SKIPing flags to get only required information */
239
240 /* determine size of buffer */
241 size = 0;
242 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
243 if (ret != ERROR_BUFFER_OVERFLOW)
244 {
245 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
246 return -1;
247 }
248 if (size == 0)
249 {
250 LogRel(("NAT: Win socket API returns non capacity\n"));
251 return -1;
252 }
253
254 pAdapterAddr = RTMemAllocZ(size);
255 if (!pAdapterAddr)
256 {
257 LogRel(("NAT: No memory available \n"));
258 return -1;
259 }
260 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
261 if (ret != ERROR_SUCCESS)
262 {
263 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
264 RTMemFree(pAdapterAddr);
265 return -1;
266 }
267
268 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
269 {
270 int found;
271 if (pAddr->OperStatus != IfOperStatusUp)
272 continue;
273
274 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
275 {
276 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
277 struct in_addr InAddr;
278 struct dns_entry *pDns;
279
280 if (SockAddr->sa_family != AF_INET)
281 continue;
282
283 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
284
285 /* add dns server to list */
286 pDns = RTMemAllocZ(sizeof(struct dns_entry));
287 if (!pDns)
288 {
289 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
290 RTMemFree(pAdapterAddr);
291 return VERR_NO_MEMORY;
292 }
293
294 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
295 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
296 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
297 else
298 pDns->de_addr.s_addr = InAddr.s_addr;
299
300 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
301
302 if (pAddr->DnsSuffix == NULL)
303 continue;
304
305 /* uniq */
306 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
307 if (!pszSuffix || strlen(pszSuffix) == 0)
308 {
309 RTStrFree(pszSuffix);
310 continue;
311 }
312
313 found = 0;
314 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
315 {
316 if ( pDomain->dd_pszDomain != NULL
317 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
318 {
319 found = 1;
320 RTStrFree(pszSuffix);
321 break;
322 }
323 }
324 if (!found)
325 {
326 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
327 if (!pDomain)
328 {
329 LogRel(("NAT: not enough memory\n"));
330 RTStrFree(pszSuffix);
331 RTMemFree(pAdapterAddr);
332 return VERR_NO_MEMORY;
333 }
334 pDomain->dd_pszDomain = pszSuffix;
335 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
336 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
337 }
338 }
339 }
340 RTMemFree(pAdapterAddr);
341 return 0;
342}
343
344#else /* !RT_OS_WINDOWS */
345
346static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
347{
348 size_t cbRead;
349 char bTest;
350 int rc = VERR_NO_MEMORY;
351 char *pu8Buf = (char *)pvBuf;
352 *pcbRead = 0;
353
354 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
355 && (pu8Buf - (char *)pvBuf) < cbBufSize)
356 {
357 if (cbRead == 0)
358 return VERR_EOF;
359
360 if (bTest == '\r' || bTest == '\n')
361 {
362 *pu8Buf = 0;
363 return VINF_SUCCESS;
364 }
365 *pu8Buf = bTest;
366 pu8Buf++;
367 (*pcbRead)++;
368 }
369 return rc;
370}
371
372static int get_dns_addr_domain(PNATState pData, bool fVerbose,
373 struct in_addr *pdns_addr,
374 const char **ppszDomain)
375{
376 char buff[512];
377 char buff2[256];
378 RTFILE f;
379 int fFoundNameserver = 0;
380 struct in_addr tmp_addr;
381 int rc;
382 size_t bytes;
383
384# ifdef RT_OS_OS2
385 /* Try various locations. */
386 char *etc = getenv("ETC");
387 if (etc)
388 {
389 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
390 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
391 }
392 if (RT_FAILURE(rc))
393 {
394 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
395 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
396 }
397 if (RT_FAILURE(rc))
398 {
399 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
400 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
401 }
402# else /* !RT_OS_OS2 */
403# ifndef DEBUG_vvl
404 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
405# else
406 char *home = getenv("HOME");
407 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
408 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
409 if (RT_SUCCESS(rc))
410 {
411 Log(("NAT: DNS we're using %s\n", buff));
412 }
413 else
414 {
415 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
416 Log(("NAT: DNS we're using %s\n", buff));
417 }
418# endif
419# endif /* !RT_OS_OS2 */
420 if (RT_FAILURE(rc))
421 return -1;
422
423 if (ppszDomain)
424 *ppszDomain = NULL;
425
426 Log(("NAT: DNS Servers:\n"));
427 while ( RT_SUCCESS(rc = RTFileGets(f, buff, 512, &bytes))
428 && rc != VERR_EOF)
429 {
430 struct dns_entry *pDns = NULL;
431 if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1)
432 {
433 if (!inet_aton(buff2, &tmp_addr))
434 continue;
435
436 /* localhost mask */
437 pDns = RTMemAllocZ(sizeof (struct dns_entry));
438 if (!pDns)
439 {
440 LogRel(("can't alloc memory for DNS entry\n"));
441 return -1;
442 }
443
444 /* check */
445 pDns->de_addr.s_addr = tmp_addr.s_addr;
446 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
447 {
448 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
449 }
450 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
451 fFoundNameserver++;
452 }
453 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
454 {
455 char *tok;
456 char *saveptr;
457 struct dns_domain_entry *pDomain = NULL;
458 int fFoundDomain = 0;
459 tok = strtok_r(&buff[6], " \t\n", &saveptr);
460 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
461 {
462 if ( tok != NULL
463 && strcmp(tok, pDomain->dd_pszDomain) == 0)
464 {
465 fFoundDomain = 1;
466 break;
467 }
468 }
469 if (tok != NULL && !fFoundDomain)
470 {
471 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
472 if (!pDomain)
473 {
474 LogRel(("NAT: not enought memory to add domain list\n"));
475 return VERR_NO_MEMORY;
476 }
477 pDomain->dd_pszDomain = RTStrDup(tok);
478 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
479 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
480 }
481 }
482 }
483 RTFileClose(f);
484 if (!fFoundNameserver)
485 return -1;
486 return 0;
487}
488
489#endif /* !RT_OS_WINDOWS */
490
491static int slirp_init_dns_list(PNATState pData)
492{
493 TAILQ_INIT(&pData->pDnsList);
494 LIST_INIT(&pData->pDomainList);
495 return get_dns_addr_domain(pData, true, NULL, NULL);
496}
497
498static void slirp_release_dns_list(PNATState pData)
499{
500 struct dns_entry *pDns = NULL;
501 struct dns_domain_entry *pDomain = NULL;
502
503 while (!TAILQ_EMPTY(&pData->pDnsList))
504 {
505 pDns = TAILQ_FIRST(&pData->pDnsList);
506 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
507 RTMemFree(pDns);
508 }
509
510 while (!LIST_EMPTY(&pData->pDomainList))
511 {
512 pDomain = LIST_FIRST(&pData->pDomainList);
513 LIST_REMOVE(pDomain, dd_list);
514 if (pDomain->dd_pszDomain != NULL)
515 RTStrFree(pDomain->dd_pszDomain);
516 RTMemFree(pDomain);
517 }
518}
519
520int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
521{
522 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
523}
524
525#ifndef VBOX_WITH_NAT_SERVICE
526int slirp_init(PNATState *ppData, const char *pszNetAddr, uint32_t u32Netmask,
527 bool fPassDomain, bool fUseHostResolver, void *pvUser)
528#else
529int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
530 bool fPassDomain, bool fUseHostResolver, void *pvUser)
531#endif
532{
533 int fNATfailed = 0;
534 int rc;
535 PNATState pData = RTMemAllocZ(sizeof(NATState));
536 *ppData = pData;
537 if (!pData)
538 return VERR_NO_MEMORY;
539 if (u32Netmask & 0x1f)
540 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
541 return VERR_INVALID_PARAMETER;
542 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
543 pData->use_host_resolver = fUseHostResolver;
544 pData->pvUser = pvUser;
545 pData->netmask = u32Netmask;
546
547 /* sockets & TCP defaults */
548 pData->socket_rcv = 64 * _1K;
549 pData->socket_snd = 64 * _1K;
550 tcp_sndspace = 64 * _1K;
551 tcp_rcvspace = 64 * _1K;
552
553#ifdef RT_OS_WINDOWS
554 {
555 WSADATA Data;
556 WSAStartup(MAKEWORD(2, 0), &Data);
557 }
558 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
559#endif
560#ifdef VBOX_WITH_SLIRP_MT
561 QSOCKET_LOCK_CREATE(tcb);
562 QSOCKET_LOCK_CREATE(udb);
563 rc = RTReqCreateQueue(&pData->pReqQueue);
564 AssertReleaseRC(rc);
565#endif
566
567 link_up = 1;
568
569 rc = bootp_dhcp_init(pData);
570 if (rc != 0)
571 {
572 LogRel(("NAT: DHCP server initialization was failed\n"));
573 return VINF_NAT_DNS;
574 }
575 debug_init();
576 if_init(pData);
577 ip_init(pData);
578 icmp_init(pData);
579
580 /* Initialise mbufs *after* setting the MTU */
581#ifndef VBOX_WITH_SLIRP_BSD_MBUF
582 m_init(pData);
583#else
584 mbuf_init(pData);
585#endif
586
587#ifndef VBOX_WITH_NAT_SERVICE
588 inet_aton(pszNetAddr, &pData->special_addr);
589#else
590 pData->special_addr.s_addr = u32NetAddr;
591#endif
592 pData->slirp_ethaddr = &special_ethaddr[0];
593 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
594 /* @todo: add ability to configure this staff */
595
596 /* set default addresses */
597 inet_aton("127.0.0.1", &loopback_addr);
598 if (!pData->use_host_resolver)
599 {
600 if (slirp_init_dns_list(pData) < 0)
601 fNATfailed = 1;
602
603 dnsproxy_init(pData);
604 }
605
606 getouraddr(pData);
607 {
608 int flags = 0;
609 struct in_addr proxy_addr;
610 pData->proxy_alias = LibAliasInit(pData, NULL);
611 if (pData->proxy_alias == NULL)
612 {
613 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
614 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
615 }
616 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
617#ifndef NO_FW_PUNCH
618 flags |= PKT_ALIAS_PUNCH_FW;
619#endif
620 flags |= PKT_ALIAS_LOG; /* set logging */
621 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
622 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
623 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
624 ftp_alias_load(pData);
625 nbt_alias_load(pData);
626 if (pData->use_host_resolver)
627 dns_alias_load(pData);
628 }
629 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
630}
631
632/**
633 * Register statistics.
634 */
635void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
636{
637#ifdef VBOX_WITH_STATISTICS
638# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
639# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
640# include "counters.h"
641# undef COUNTER
642/** @todo register statistics for the variables dumped by:
643 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
644 * mbufstats(pData); sockstats(pData); */
645#endif /* VBOX_WITH_STATISTICS */
646}
647
648/**
649 * Deregister statistics.
650 */
651void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
652{
653 if (pData == NULL)
654 return;
655#ifdef VBOX_WITH_STATISTICS
656# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
657# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
658# include "counters.h"
659#endif /* VBOX_WITH_STATISTICS */
660}
661
662/**
663 * Marks the link as up, making it possible to establish new connections.
664 */
665void slirp_link_up(PNATState pData)
666{
667 struct arp_cache_entry *ac;
668 link_up = 1;
669
670 if (LIST_EMPTY(&pData->arp_cache))
671 return;
672
673 LIST_FOREACH(ac, &pData->arp_cache, list)
674 {
675 activate_port_forwarding(pData, ac->ether);
676 }
677}
678
679/**
680 * Marks the link as down and cleans up the current connections.
681 */
682void slirp_link_down(PNATState pData)
683{
684 struct socket *so;
685 struct port_forward_rule *rule;
686
687 while ((so = tcb.so_next) != &tcb)
688 {
689 if (so->so_state & SS_NOFDREF || so->s == -1)
690 sofree(pData, so);
691 else
692 tcp_drop(pData, sototcpcb(so), 0);
693 }
694
695 while ((so = udb.so_next) != &udb)
696 udp_detach(pData, so);
697
698 /*
699 * Clear the active state of port-forwarding rules to force
700 * re-setup on restoration of communications.
701 */
702 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
703 {
704 rule->activated = 0;
705 }
706 pData->cRedirectionsActive = 0;
707
708 link_up = 0;
709}
710
711/**
712 * Terminates the slirp component.
713 */
714void slirp_term(PNATState pData)
715{
716 if (pData == NULL)
717 return;
718#ifdef RT_OS_WINDOWS
719 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
720 FreeLibrary(pData->hmIcmpLibrary);
721 RTMemFree(pData->pvIcmpBuffer);
722#else
723 closesocket(pData->icmp_socket.s);
724#endif
725
726 slirp_link_down(pData);
727 slirp_release_dns_list(pData);
728 ftp_alias_unload(pData);
729 nbt_alias_unload(pData);
730 if (pData->use_host_resolver)
731 dns_alias_unload(pData);
732 while (!LIST_EMPTY(&instancehead))
733 {
734 struct libalias *la = LIST_FIRST(&instancehead);
735 /* libalias do all clean up */
736 LibAliasUninit(la);
737 }
738 while (!LIST_EMPTY(&pData->arp_cache))
739 {
740 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
741 LIST_REMOVE(ac, list);
742 RTMemFree(ac);
743 }
744 bootp_dhcp_fini(pData);
745 m_fini(pData);
746#ifdef RT_OS_WINDOWS
747 WSACleanup();
748#endif
749#ifdef LOG_ENABLED
750 Log(("\n"
751 "NAT statistics\n"
752 "--------------\n"
753 "\n"));
754 ipstats(pData);
755 tcpstats(pData);
756 udpstats(pData);
757 icmpstats(pData);
758 mbufstats(pData);
759 sockstats(pData);
760 Log(("\n"
761 "\n"
762 "\n"));
763#endif
764 RTMemFree(pData);
765}
766
767
768#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
769#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
770
771/*
772 * curtime kept to an accuracy of 1ms
773 */
774static void updtime(PNATState pData)
775{
776#ifdef RT_OS_WINDOWS
777 struct _timeb tb;
778
779 _ftime(&tb);
780 curtime = (u_int)tb.time * (u_int)1000;
781 curtime += (u_int)tb.millitm;
782#else
783 gettimeofday(&tt, 0);
784
785 curtime = (u_int)tt.tv_sec * (u_int)1000;
786 curtime += (u_int)tt.tv_usec / (u_int)1000;
787
788 if ((tt.tv_usec % 1000) >= 500)
789 curtime++;
790#endif
791}
792
793#ifdef RT_OS_WINDOWS
794void slirp_select_fill(PNATState pData, int *pnfds)
795#else /* RT_OS_WINDOWS */
796void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
797#endif /* !RT_OS_WINDOWS */
798{
799 struct socket *so, *so_next;
800 int nfds;
801#if defined(RT_OS_WINDOWS)
802 int rc;
803 int error;
804#else
805 int poll_index = 0;
806#endif
807 int i;
808
809 STAM_PROFILE_START(&pData->StatFill, a);
810
811 nfds = *pnfds;
812
813 /*
814 * First, TCP sockets
815 */
816 do_slowtimo = 0;
817 if (!link_up)
818 goto done;
819
820 /*
821 * *_slowtimo needs calling if there are IP fragments
822 * in the fragment queue, or there are TCP connections active
823 */
824 /* XXX:
825 * triggering of fragment expiration should be the same but use new macroses
826 */
827 do_slowtimo = (tcb.so_next != &tcb);
828 if (!do_slowtimo)
829 {
830 for (i = 0; i < IPREASS_NHASH; i++)
831 {
832 if (!TAILQ_EMPTY(&ipq[i]))
833 {
834 do_slowtimo = 1;
835 break;
836 }
837 }
838 }
839 /* always add the ICMP socket */
840#ifndef RT_OS_WINDOWS
841 pData->icmp_socket.so_poll_index = -1;
842#endif
843 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
844
845 STAM_COUNTER_RESET(&pData->StatTCP);
846 STAM_COUNTER_RESET(&pData->StatTCPHot);
847
848 QSOCKET_FOREACH(so, so_next, tcp)
849 /* { */
850#if !defined(RT_OS_WINDOWS)
851 so->so_poll_index = -1;
852#endif
853#ifndef VBOX_WITH_SLIRP_BSD_MBUF
854 if (pData->fmbuf_water_line == 1)
855 {
856 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
857 {
858 pData->fmbuf_water_warn_sent = 0;
859 pData->fmbuf_water_line = 0;
860 }
861# ifndef RT_OS_WINDOWS
862 poll_index = 0;
863# endif
864 goto done;
865 }
866#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
867 STAM_COUNTER_INC(&pData->StatTCP);
868
869 /*
870 * See if we need a tcp_fasttimo
871 */
872 if ( time_fasttimo == 0
873 && so->so_tcpcb != NULL
874 && so->so_tcpcb->t_flags & TF_DELACK)
875 {
876 time_fasttimo = curtime; /* Flag when we want a fasttimo */
877 }
878
879 /*
880 * NOFDREF can include still connecting to local-host,
881 * newly socreated() sockets etc. Don't want to select these.
882 */
883 if (so->so_state & SS_NOFDREF || so->s == -1)
884 CONTINUE(tcp);
885
886 /*
887 * Set for reading sockets which are accepting
888 */
889 if (so->so_state & SS_FACCEPTCONN)
890 {
891 STAM_COUNTER_INC(&pData->StatTCPHot);
892 TCP_ENGAGE_EVENT1(so, readfds);
893 CONTINUE(tcp);
894 }
895
896 /*
897 * Set for writing sockets which are connecting
898 */
899 if (so->so_state & SS_ISFCONNECTING)
900 {
901 Log2(("connecting %R[natsock] engaged\n",so));
902 STAM_COUNTER_INC(&pData->StatTCPHot);
903 TCP_ENGAGE_EVENT1(so, writefds);
904 }
905
906 /*
907 * Set for writing if we are connected, can send more, and
908 * we have something to send
909 */
910 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
911 {
912 STAM_COUNTER_INC(&pData->StatTCPHot);
913 TCP_ENGAGE_EVENT1(so, writefds);
914 }
915
916 /*
917 * Set for reading (and urgent data) if we are connected, can
918 * receive more, and we have room for it XXX /2 ?
919 */
920 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
921 {
922 STAM_COUNTER_INC(&pData->StatTCPHot);
923 TCP_ENGAGE_EVENT2(so, readfds, xfds);
924 }
925 LOOP_LABEL(tcp, so, so_next);
926 }
927
928 /*
929 * UDP sockets
930 */
931 STAM_COUNTER_RESET(&pData->StatUDP);
932 STAM_COUNTER_RESET(&pData->StatUDPHot);
933
934 QSOCKET_FOREACH(so, so_next, udp)
935 /* { */
936
937#ifndef VBOX_WITH_SLIRP_BSD_MBUF
938 if (pData->fmbuf_water_line == 1)
939 {
940 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
941 {
942 pData->fmbuf_water_line = 0;
943 pData->fmbuf_water_warn_sent = 0;
944 }
945# ifndef RT_OS_WINDOWS
946 poll_index = 0;
947# endif
948 goto done;
949 }
950#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
951 STAM_COUNTER_INC(&pData->StatUDP);
952#if !defined(RT_OS_WINDOWS)
953 so->so_poll_index = -1;
954#endif
955
956 /*
957 * See if it's timed out
958 */
959 if (so->so_expire)
960 {
961 if (so->so_expire <= curtime)
962 {
963 Log2(("NAT: %R[natsock] expired\n", so));
964 if (so->so_timeout != NULL)
965 {
966 so->so_timeout(pData, so, so->so_timeout_arg);
967 }
968#ifdef VBOX_WITH_SLIRP_MT
969 /* we need so_next for continue our cycle*/
970 so_next = so->so_next;
971#endif
972 UDP_DETACH(pData, so, so_next);
973 CONTINUE_NO_UNLOCK(udp);
974 }
975 else
976 {
977 do_slowtimo = 1; /* Let socket expire */
978 }
979 }
980
981 /*
982 * When UDP packets are received from over the link, they're
983 * sendto()'d straight away, so no need for setting for writing
984 * Limit the number of packets queued by this session to 4.
985 * Note that even though we try and limit this to 4 packets,
986 * the session could have more queued if the packets needed
987 * to be fragmented.
988 *
989 * (XXX <= 4 ?)
990 */
991 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
992 {
993 STAM_COUNTER_INC(&pData->StatUDPHot);
994 UDP_ENGAGE_EVENT(so, readfds);
995 }
996 LOOP_LABEL(udp, so, so_next);
997 }
998done:
999
1000#if defined(RT_OS_WINDOWS)
1001 *pnfds = VBOX_EVENT_COUNT;
1002#else /* RT_OS_WINDOWS */
1003 AssertRelease(poll_index <= *pnfds);
1004 *pnfds = poll_index;
1005#endif /* !RT_OS_WINDOWS */
1006
1007 STAM_PROFILE_STOP(&pData->StatFill, a);
1008}
1009
1010#if defined(RT_OS_WINDOWS)
1011void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1012#else /* RT_OS_WINDOWS */
1013void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1014#endif /* !RT_OS_WINDOWS */
1015{
1016 struct socket *so, *so_next;
1017 int ret;
1018#if defined(RT_OS_WINDOWS)
1019 WSANETWORKEVENTS NetworkEvents;
1020 int rc;
1021 int error;
1022#else
1023 int poll_index = 0;
1024#endif
1025
1026 STAM_PROFILE_START(&pData->StatPoll, a);
1027
1028 /* Update time */
1029 updtime(pData);
1030
1031 /*
1032 * See if anything has timed out
1033 */
1034 if (link_up)
1035 {
1036 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1037 {
1038 STAM_PROFILE_START(&pData->StatFastTimer, b);
1039 tcp_fasttimo(pData);
1040 time_fasttimo = 0;
1041 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1042 }
1043 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1044 {
1045 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1046 ip_slowtimo(pData);
1047 tcp_slowtimo(pData);
1048 last_slowtimo = curtime;
1049 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1050 }
1051 }
1052#if defined(RT_OS_WINDOWS)
1053 if (fTimeout)
1054 return; /* only timer update */
1055#endif
1056
1057 /*
1058 * Check sockets
1059 */
1060 if (!link_up)
1061 goto done;
1062#if defined(RT_OS_WINDOWS)
1063 /*XXX: before renaming please make see define
1064 * fIcmp in slirp_state.h
1065 */
1066 if (fIcmp)
1067 sorecvfrom(pData, &pData->icmp_socket);
1068#else
1069 if ( (pData->icmp_socket.s != -1)
1070 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1071 sorecvfrom(pData, &pData->icmp_socket);
1072#endif
1073 /*
1074 * Check TCP sockets
1075 */
1076 QSOCKET_FOREACH(so, so_next, tcp)
1077 /* { */
1078#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1079 if (pData->fmbuf_water_line == 1)
1080 {
1081 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1082 {
1083 pData->fmbuf_water_line = 0;
1084 pData->fmbuf_water_warn_sent = 0;
1085 }
1086 goto done;
1087 }
1088#endif
1089
1090#ifdef VBOX_WITH_SLIRP_MT
1091 if ( so->so_state & SS_NOFDREF
1092 && so->so_deleted == 1)
1093 {
1094 struct socket *son, *sop = NULL;
1095 QSOCKET_LOCK(tcb);
1096 if (so->so_next != NULL)
1097 {
1098 if (so->so_next != &tcb)
1099 SOCKET_LOCK(so->so_next);
1100 son = so->so_next;
1101 }
1102 if ( so->so_prev != &tcb
1103 && so->so_prev != NULL)
1104 {
1105 SOCKET_LOCK(so->so_prev);
1106 sop = so->so_prev;
1107 }
1108 QSOCKET_UNLOCK(tcb);
1109 remque(pData, so);
1110 NSOCK_DEC();
1111 SOCKET_UNLOCK(so);
1112 SOCKET_LOCK_DESTROY(so);
1113 RTMemFree(so);
1114 so_next = son;
1115 if (sop != NULL)
1116 SOCKET_UNLOCK(sop);
1117 CONTINUE_NO_UNLOCK(tcp);
1118 }
1119#endif
1120 /*
1121 * FD_ISSET is meaningless on these sockets
1122 * (and they can crash the program)
1123 */
1124 if (so->so_state & SS_NOFDREF || so->s == -1)
1125 CONTINUE(tcp);
1126
1127 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1128
1129 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1130
1131
1132 /*
1133 * Check for URG data
1134 * This will soread as well, so no need to
1135 * test for readfds below if this succeeds
1136 */
1137
1138 /* out-of-band data */
1139 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1140#ifdef RT_OS_DARWIN
1141 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1142 * combination on other Unixs hosts doesn't enter to this branch
1143 */
1144 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1145#endif
1146 )
1147 {
1148 sorecvoob(pData, so);
1149 }
1150
1151 /*
1152 * Check sockets for reading
1153 */
1154 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1155 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1156 {
1157 /*
1158 * Check for incoming connections
1159 */
1160 if (so->so_state & SS_FACCEPTCONN)
1161 {
1162 TCP_CONNECT(pData, so);
1163 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1164 CONTINUE(tcp);
1165 }
1166
1167 ret = soread(pData, so);
1168 /* Output it if we read something */
1169 if (RT_LIKELY(ret > 0))
1170 TCP_OUTPUT(pData, sototcpcb(so));
1171 }
1172
1173 /*
1174 * Check for FD_CLOSE events.
1175 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1176 */
1177 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1178 || (so->so_close == 1))
1179 {
1180 /*
1181 * drain the socket
1182 */
1183 for (;;)
1184 {
1185 ret = soread(pData, so);
1186 if (ret > 0)
1187 TCP_OUTPUT(pData, sototcpcb(so));
1188 else
1189 {
1190 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1191 break;
1192 }
1193 }
1194 /* mark the socket for termination _after_ it was drained */
1195 so->so_close = 1;
1196 CONTINUE(tcp);
1197 }
1198
1199 /*
1200 * Check sockets for writing
1201 */
1202 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1203 {
1204 /*
1205 * Check for non-blocking, still-connecting sockets
1206 */
1207 if (so->so_state & SS_ISFCONNECTING)
1208 {
1209 Log2(("connecting %R[natsock] catched\n", so));
1210 /* Connected */
1211 so->so_state &= ~SS_ISFCONNECTING;
1212
1213 /*
1214 * This should be probably guarded by PROBE_CONN too. Anyway,
1215 * we disable it on OS/2 because the below send call returns
1216 * EFAULT which causes the opened TCP socket to close right
1217 * after it has been opened and connected.
1218 */
1219#ifndef RT_OS_OS2
1220 ret = send(so->s, (const char *)&ret, 0, 0);
1221 if (ret < 0)
1222 {
1223 /* XXXXX Must fix, zero bytes is a NOP */
1224 if ( errno == EAGAIN
1225 || errno == EWOULDBLOCK
1226 || errno == EINPROGRESS
1227 || errno == ENOTCONN)
1228 CONTINUE(tcp);
1229
1230 /* else failed */
1231 so->so_state = SS_NOFDREF;
1232 }
1233 /* else so->so_state &= ~SS_ISFCONNECTING; */
1234#endif
1235
1236 /*
1237 * Continue tcp_input
1238 */
1239 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1240 /* continue; */
1241 }
1242 else
1243 SOWRITE(ret, pData, so);
1244 /*
1245 * XXX If we wrote something (a lot), there could be the need
1246 * for a window update. In the worst case, the remote will send
1247 * a window probe to get things going again.
1248 */
1249 }
1250
1251 /*
1252 * Probe a still-connecting, non-blocking socket
1253 * to check if it's still alive
1254 */
1255#ifdef PROBE_CONN
1256 if (so->so_state & SS_ISFCONNECTING)
1257 {
1258 ret = recv(so->s, (char *)&ret, 0, 0);
1259
1260 if (ret < 0)
1261 {
1262 /* XXX */
1263 if ( errno == EAGAIN
1264 || errno == EWOULDBLOCK
1265 || errno == EINPROGRESS
1266 || errno == ENOTCONN)
1267 {
1268 CONTINUE(tcp); /* Still connecting, continue */
1269 }
1270
1271 /* else failed */
1272 so->so_state = SS_NOFDREF;
1273
1274 /* tcp_input will take care of it */
1275 }
1276 else
1277 {
1278 ret = send(so->s, &ret, 0, 0);
1279 if (ret < 0)
1280 {
1281 /* XXX */
1282 if ( errno == EAGAIN
1283 || errno == EWOULDBLOCK
1284 || errno == EINPROGRESS
1285 || errno == ENOTCONN)
1286 {
1287 CONTINUE(tcp);
1288 }
1289 /* else failed */
1290 so->so_state = SS_NOFDREF;
1291 }
1292 else
1293 so->so_state &= ~SS_ISFCONNECTING;
1294
1295 }
1296 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1297 } /* SS_ISFCONNECTING */
1298#endif
1299 LOOP_LABEL(tcp, so, so_next);
1300 }
1301
1302 /*
1303 * Now UDP sockets.
1304 * Incoming packets are sent straight away, they're not buffered.
1305 * Incoming UDP data isn't buffered either.
1306 */
1307 QSOCKET_FOREACH(so, so_next, udp)
1308 /* { */
1309#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1310 if (pData->fmbuf_water_line == 1)
1311 {
1312 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1313 {
1314 pData->fmbuf_water_line = 0;
1315 pData->fmbuf_water_warn_sent = 0;
1316 }
1317 goto done;
1318 }
1319#endif
1320#ifdef VBOX_WITH_SLIRP_MT
1321 if ( so->so_state & SS_NOFDREF
1322 && so->so_deleted == 1)
1323 {
1324 struct socket *son, *sop = NULL;
1325 QSOCKET_LOCK(udb);
1326 if (so->so_next != NULL)
1327 {
1328 if (so->so_next != &udb)
1329 SOCKET_LOCK(so->so_next);
1330 son = so->so_next;
1331 }
1332 if ( so->so_prev != &udb
1333 && so->so_prev != NULL)
1334 {
1335 SOCKET_LOCK(so->so_prev);
1336 sop = so->so_prev;
1337 }
1338 QSOCKET_UNLOCK(udb);
1339 remque(pData, so);
1340 NSOCK_DEC();
1341 SOCKET_UNLOCK(so);
1342 SOCKET_LOCK_DESTROY(so);
1343 RTMemFree(so);
1344 so_next = son;
1345 if (sop != NULL)
1346 SOCKET_UNLOCK(sop);
1347 CONTINUE_NO_UNLOCK(udp);
1348 }
1349#endif
1350 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1351
1352 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1353
1354 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1355 {
1356 SORECVFROM(pData, so);
1357 }
1358 LOOP_LABEL(udp, so, so_next);
1359 }
1360
1361done:
1362#if 0
1363 /*
1364 * See if we can start outputting
1365 */
1366 if (if_queued && link_up)
1367 if_start(pData);
1368#endif
1369
1370 STAM_PROFILE_STOP(&pData->StatPoll, a);
1371}
1372
1373
1374struct arphdr
1375{
1376 unsigned short ar_hrd; /* format of hardware address */
1377 unsigned short ar_pro; /* format of protocol address */
1378 unsigned char ar_hln; /* length of hardware address */
1379 unsigned char ar_pln; /* length of protocol address */
1380 unsigned short ar_op; /* ARP opcode (command) */
1381
1382 /*
1383 * Ethernet looks like this : This bit is variable sized however...
1384 */
1385 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1386 unsigned char ar_sip[4]; /* sender IP address */
1387 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1388 unsigned char ar_tip[4]; /* target IP address */
1389};
1390AssertCompileSize(struct arphdr, 28);
1391
1392static void arp_input(PNATState pData, struct mbuf *m)
1393{
1394 struct ethhdr *eh;
1395 struct ethhdr *reh;
1396 struct arphdr *ah;
1397 struct arphdr *rah;
1398 int ar_op;
1399 struct ex_list *ex_ptr;
1400 uint32_t htip;
1401 uint32_t tip;
1402 struct mbuf *mr;
1403 eh = mtod(m, struct ethhdr *);
1404 ah = (struct arphdr *)&eh[1];
1405 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1406 tip = *(uint32_t*)ah->ar_tip;
1407
1408 ar_op = RT_N2H_U16(ah->ar_op);
1409
1410 switch (ar_op)
1411 {
1412 case ARPOP_REQUEST:
1413#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1414 mr = m_get(pData);
1415
1416 reh = mtod(mr, struct ethhdr *);
1417 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1418 Log4(("NAT: arp:%R[ether]->%R[ether]\n",
1419 reh->h_source, reh->h_dest));
1420 Log4(("NAT: arp: %R[IP4]\n", &tip));
1421
1422 mr->m_data += if_maxlinkhdr;
1423 mr->m_len = sizeof(struct arphdr);
1424 rah = mtod(mr, struct arphdr *);
1425#else
1426 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1427 reh = mtod(mr, struct ethhdr *);
1428 mr->m_data += ETH_HLEN;
1429 rah = mtod(mr, struct arphdr *);
1430 mr->m_len = sizeof(struct arphdr);
1431 Assert(mr);
1432 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1433#endif
1434#ifdef VBOX_WITH_NAT_SERVICE
1435 if (tip == pData->special_addr.s_addr)
1436 goto arp_ok;
1437#endif
1438 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1439 {
1440 if ( CTL_CHECK(htip, CTL_DNS)
1441 || CTL_CHECK(htip, CTL_ALIAS)
1442 || CTL_CHECK(htip, CTL_TFTP))
1443 goto arp_ok;
1444 for (ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next)
1445 {
1446 if ((htip & ~pData->netmask) == ex_ptr->ex_addr)
1447 {
1448 goto arp_ok;
1449 }
1450 }
1451 m_free(pData, m);
1452 m_free(pData, mr);
1453 return;
1454
1455 arp_ok:
1456 rah->ar_hrd = RT_H2N_U16_C(1);
1457 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1458 rah->ar_hln = ETH_ALEN;
1459 rah->ar_pln = 4;
1460 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1461 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1462
1463 switch (htip & ~pData->netmask)
1464 {
1465 case CTL_DNS:
1466 case CTL_ALIAS:
1467 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1468 break;
1469 default:;
1470 }
1471
1472 memcpy(rah->ar_sip, ah->ar_tip, 4);
1473 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1474 memcpy(rah->ar_tip, ah->ar_sip, 4);
1475 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1476 m_free(pData, m);
1477 }
1478 /* Gratuitous ARP */
1479 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1480 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1481 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1482 {
1483 /* we've received anounce about address asignment
1484 * Let's do ARP cache update
1485 */
1486 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1487 {
1488 m_free(pData, mr);
1489 m_free(pData, m);
1490 break;
1491 }
1492 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1493 }
1494 break;
1495
1496 case ARPOP_REPLY:
1497 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1498 {
1499 m_free(pData, m);
1500 break;
1501 }
1502 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1503 m_free(pData, m);
1504 break;
1505
1506 default:
1507 break;
1508 }
1509}
1510
1511/**
1512 * Feed a packet into the slirp engine.
1513 *
1514 * @param m Data buffer, m_len is not valid.
1515 * @param cbBuf The length of the data in m.
1516 */
1517void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1518{
1519 int proto;
1520 static bool fWarnedIpv6;
1521 struct ethhdr *eh;
1522 uint8_t au8Ether[ETH_ALEN];
1523
1524 m->m_len = cbBuf;
1525 if (cbBuf < ETH_HLEN)
1526 {
1527 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1528 m_free(pData, m);
1529 return;
1530 }
1531 eh = mtod(m, struct ethhdr *);
1532 proto = RT_N2H_U16(eh->h_proto);
1533
1534 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1535
1536 switch(proto)
1537 {
1538 case ETH_P_ARP:
1539 arp_input(pData, m);
1540 break;
1541
1542 case ETH_P_IP:
1543 /* Update time. Important if the network is very quiet, as otherwise
1544 * the first outgoing connection gets an incorrect timestamp. */
1545 updtime(pData);
1546 m_adj(m, ETH_HLEN);
1547#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1548 M_ASSERTPKTHDR(m);
1549 m->m_pkthdr.header = mtod(m, void *);
1550#else /* !VBOX_WITH_SLIRP_BSD_MBUF */
1551 if ( pData->fmbuf_water_line
1552 && pData->fmbuf_water_warn_sent == 0
1553 && (curtime - pData->tsmbuf_water_warn_sent) > 500)
1554 {
1555 icmp_error(pData, m, ICMP_SOURCEQUENCH, 0, 0, "Out of resources!!!");
1556 pData->fmbuf_water_warn_sent = 1;
1557 pData->tsmbuf_water_warn_sent = curtime;
1558 }
1559#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
1560 ip_input(pData, m);
1561 break;
1562
1563 case ETH_P_IPV6:
1564 m_free(pData, m);
1565 if (!fWarnedIpv6)
1566 {
1567 LogRel(("NAT: IPv6 not supported\n"));
1568 fWarnedIpv6 = true;
1569 }
1570 break;
1571
1572 default:
1573 Log(("NAT: Unsupported protocol %x\n", proto));
1574 m_free(pData, m);
1575 break;
1576 }
1577
1578 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1579 activate_port_forwarding(pData, au8Ether);
1580}
1581
1582/* output the IP packet to the ethernet device */
1583void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1584{
1585 struct ethhdr *eh;
1586 uint8_t *buf = NULL;
1587 size_t mlen = 0;
1588 STAM_PROFILE_START(&pData->StatIF_encap, a);
1589
1590#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1591 m->m_data -= if_maxlinkhdr;
1592 m->m_len += ETH_HLEN;
1593 eh = mtod(m, struct ethhdr *);
1594
1595 if (MBUF_HEAD(m) != m->m_data)
1596 {
1597 LogRel(("NAT: ethernet detects corruption of the packet"));
1598 AssertMsgFailed(("!!Ethernet frame corrupted!!"));
1599 }
1600#else
1601 M_ASSERTPKTHDR(m);
1602 m->m_data -= ETH_HLEN;
1603 m->m_len += ETH_HLEN;
1604 eh = mtod(m, struct ethhdr *);
1605#endif
1606
1607 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1608 {
1609 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1610 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1611 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1612 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1613 {
1614 /* don't do anything */
1615 m_free(pData, m);
1616 goto done;
1617 }
1618 }
1619#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1620 mlen = m->m_len;
1621#else
1622 mlen = m_length(m, NULL);
1623 buf = RTMemAlloc(mlen);
1624 if (buf == NULL)
1625 {
1626 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1627 m_free(pData, m);
1628 goto done;
1629 }
1630#endif
1631 eh->h_proto = RT_H2N_U16(eth_proto);
1632#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1633 m_copydata(m, 0, mlen, (char *)buf);
1634 if (flags & ETH_ENCAP_URG)
1635 slirp_urg_output(pData->pvUser, m, buf, mlen);
1636 else
1637 slirp_output(pData->pvUser, m, buf, mlen);
1638#else
1639 if (flags & ETH_ENCAP_URG)
1640 slirp_urg_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1641 else
1642 slirp_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1643#endif
1644done:
1645 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1646}
1647
1648/**
1649 * Still we're using dhcp server leasing to map ether to IP
1650 * @todo see rt_lookup_in_cache
1651 */
1652static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1653{
1654 uint32_t ip = INADDR_ANY;
1655 int rc;
1656
1657 if (eth_addr == NULL)
1658 return INADDR_ANY;
1659
1660 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1661 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1662 return INADDR_ANY;
1663
1664 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1665 if (RT_SUCCESS(rc))
1666 return ip;
1667
1668 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1669 /* ignore return code, ip will be set to INADDR_ANY on error */
1670 return ip;
1671}
1672
1673/**
1674 * We need check if we've activated port forwarding
1675 * for specific machine ... that of course relates to
1676 * service mode
1677 * @todo finish this for service case
1678 */
1679static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1680{
1681 struct port_forward_rule *rule;
1682
1683 /* check mac here */
1684 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1685 {
1686 struct socket *so;
1687 struct alias_link *alias_link;
1688 struct libalias *lib;
1689 int flags;
1690 struct sockaddr sa;
1691 struct sockaddr_in *psin;
1692 socklen_t socketlen;
1693 struct in_addr alias;
1694 int rc;
1695 uint32_t guest_addr; /* need to understand if we already give address to guest */
1696
1697 if (rule->activated)
1698 continue;
1699
1700#ifdef VBOX_WITH_NAT_SERVICE
1701 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1702 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1703 guest_addr = find_guest_ip(pData, h_source);
1704#else
1705#if 0
1706 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1707 continue;
1708#endif
1709 guest_addr = find_guest_ip(pData, h_source);
1710#endif
1711 if (guest_addr == INADDR_ANY)
1712 {
1713 /* the address wasn't granted */
1714 return;
1715 }
1716
1717#if !defined(VBOX_WITH_NAT_SERVICE)
1718 if (rule->guest_addr.s_addr != guest_addr)
1719 continue;
1720#endif
1721
1722 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1723 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1724 rule->host_port, rule->guest_port, &guest_addr));
1725
1726 if (rule->proto == IPPROTO_UDP)
1727 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1728 RT_H2N_U16(rule->guest_port), 0);
1729 else
1730 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1731 RT_H2N_U16(rule->guest_port), 0);
1732
1733 if (so == NULL)
1734 goto remove_port_forwarding;
1735
1736 psin = (struct sockaddr_in *)&sa;
1737 psin->sin_family = AF_INET;
1738 psin->sin_port = 0;
1739 psin->sin_addr.s_addr = INADDR_ANY;
1740 socketlen = sizeof(struct sockaddr);
1741
1742 rc = getsockname(so->s, &sa, &socketlen);
1743 if (rc < 0 || sa.sa_family != AF_INET)
1744 goto remove_port_forwarding;
1745
1746 psin = (struct sockaddr_in *)&sa;
1747
1748 lib = LibAliasInit(pData, NULL);
1749 flags = LibAliasSetMode(lib, 0, 0);
1750 flags |= PKT_ALIAS_LOG; /* set logging */
1751 flags |= PKT_ALIAS_REVERSE; /* set logging */
1752 flags = LibAliasSetMode(lib, flags, ~0);
1753
1754 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1755 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1756 alias, RT_H2N_U16(rule->guest_port),
1757 pData->special_addr, -1, /* not very clear for now */
1758 rule->proto);
1759 if (!alias_link)
1760 goto remove_port_forwarding;
1761
1762 so->so_la = lib;
1763 rule->activated = 1;
1764 pData->cRedirectionsActive++;
1765 continue;
1766
1767 remove_port_forwarding:
1768 LogRel(("NAT: failed to redirect %s %d => %d\n",
1769 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1770 LIST_REMOVE(rule, list);
1771 pData->cRedirectionsStored--;
1772 RTMemFree(rule);
1773 }
1774}
1775
1776/**
1777 * Changes in 3.1 instead of opening new socket do the following:
1778 * gain more information:
1779 * 1. bind IP
1780 * 2. host port
1781 * 3. guest port
1782 * 4. proto
1783 * 5. guest MAC address
1784 * the guest's MAC address is rather important for service, but we easily
1785 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1786 * corresponding port-forwarding
1787 */
1788int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1789 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1790{
1791 struct port_forward_rule *rule = NULL;
1792 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1793
1794 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1795 if (rule == NULL)
1796 return 1;
1797
1798 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1799 rule->host_port = host_port;
1800 rule->guest_port = guest_port;
1801#ifndef VBOX_WITH_NAT_SERVICE
1802 rule->guest_addr.s_addr = guest_addr.s_addr;
1803#endif
1804 rule->bind_ip.s_addr = host_addr.s_addr;
1805 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1806 /* @todo add mac address */
1807 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1808 pData->cRedirectionsStored++;
1809 return 0;
1810}
1811
1812int slirp_add_exec(PNATState pData, int do_pty, const char *args, int addr_low_byte,
1813 int guest_port)
1814{
1815 return add_exec(&exec_list, do_pty, (char *)args,
1816 addr_low_byte, RT_H2N_U16(guest_port));
1817}
1818
1819void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1820{
1821#ifndef VBOX_WITH_NAT_SERVICE
1822 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1823#endif
1824 if (GuestIP != INADDR_ANY)
1825 {
1826 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1827 activate_port_forwarding(pData, ethaddr);
1828 }
1829}
1830
1831#if defined(RT_OS_WINDOWS)
1832HANDLE *slirp_get_events(PNATState pData)
1833{
1834 return pData->phEvents;
1835}
1836void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1837{
1838 pData->phEvents[index] = hEvent;
1839}
1840#endif
1841
1842unsigned int slirp_get_timeout_ms(PNATState pData)
1843{
1844 if (link_up)
1845 {
1846 if (time_fasttimo)
1847 return 2;
1848 if (do_slowtimo)
1849 return 500; /* see PR_SLOWHZ */
1850 }
1851 return 3600*1000; /* one hour */
1852}
1853
1854#ifndef RT_OS_WINDOWS
1855int slirp_get_nsock(PNATState pData)
1856{
1857 return pData->nsock;
1858}
1859#endif
1860
1861/*
1862 * this function called from NAT thread
1863 */
1864void slirp_post_sent(PNATState pData, void *pvArg)
1865{
1866 struct socket *so = 0;
1867 struct tcpcb *tp = 0;
1868 struct mbuf *m = (struct mbuf *)pvArg;
1869 m_free(pData, m);
1870}
1871#ifdef VBOX_WITH_SLIRP_MT
1872void slirp_process_queue(PNATState pData)
1873{
1874 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1875}
1876void *slirp_get_queue(PNATState pData)
1877{
1878 return pData->pReqQueue;
1879}
1880#endif
1881
1882void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1883{
1884 Log2(("tftp_prefix:%s\n", tftpPrefix));
1885 tftp_prefix = tftpPrefix;
1886}
1887
1888void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1889{
1890 Log2(("bootFile:%s\n", bootFile));
1891 bootp_filename = bootFile;
1892}
1893
1894void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1895{
1896 Log2(("next_server:%s\n", next_server));
1897 if (next_server == NULL)
1898 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1899 else
1900 inet_aton(next_server, &pData->tftp_server);
1901}
1902
1903int slirp_set_binding_address(PNATState pData, char *addr)
1904{
1905 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1906 {
1907 pData->bindIP.s_addr = INADDR_ANY;
1908 return 1;
1909 }
1910 return 0;
1911}
1912
1913void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1914{
1915 if (!pData->use_host_resolver)
1916 {
1917 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1918 pData->use_dns_proxy = fDNSProxy;
1919 }
1920 else
1921 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1922}
1923
1924#define CHECK_ARG(name, val, lim_min, lim_max) \
1925 do { \
1926 if ((val) < (lim_min) || (val) > (lim_max)) \
1927 { \
1928 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1929 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1930 return; \
1931 } \
1932 else \
1933 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1934 } while (0)
1935
1936/* don't allow user set less 8kB and more than 1M values */
1937#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1938void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1939{
1940 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1941 pData->socket_rcv = kilobytes;
1942}
1943void slirp_set_sndbuf(PNATState pData, int kilobytes)
1944{
1945 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1946 pData->socket_snd = kilobytes * _1K;
1947}
1948void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1949{
1950 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1951 tcp_rcvspace = kilobytes * _1K;
1952}
1953void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1954{
1955 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1956 tcp_sndspace = kilobytes * _1K;
1957}
1958
1959/*
1960 * Looking for Ether by ip in ARP-cache
1961 * Note: it´s responsible of caller to allocate buffer for result
1962 * @returns iprt status code
1963 */
1964int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1965{
1966 struct arp_cache_entry *ac;
1967
1968 if (ether == NULL)
1969 return VERR_INVALID_PARAMETER;
1970
1971 if (LIST_EMPTY(&pData->arp_cache))
1972 return VERR_NOT_FOUND;
1973
1974 LIST_FOREACH(ac, &pData->arp_cache, list)
1975 {
1976 if (ac->ip == ip)
1977 {
1978 memcpy(ether, ac->ether, ETH_ALEN);
1979 return VINF_SUCCESS;
1980 }
1981 }
1982 return VERR_NOT_FOUND;
1983}
1984
1985/*
1986 * Looking for IP by Ether in ARP-cache
1987 * Note: it´s responsible of caller to allocate buffer for result
1988 * @returns 0 - if found, 1 - otherwise
1989 */
1990int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1991{
1992 struct arp_cache_entry *ac;
1993 *ip = INADDR_ANY;
1994
1995 if (LIST_EMPTY(&pData->arp_cache))
1996 return VERR_NOT_FOUND;
1997
1998 LIST_FOREACH(ac, &pData->arp_cache, list)
1999 {
2000 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
2001 {
2002 *ip = ac->ip;
2003 return VINF_SUCCESS;
2004 }
2005 }
2006 return VERR_NOT_FOUND;
2007}
2008
2009void slirp_arp_who_has(PNATState pData, uint32_t dst)
2010{
2011 struct mbuf *m;
2012 struct ethhdr *ehdr;
2013 struct arphdr *ahdr;
2014
2015#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2016 m = m_get(pData);
2017#else
2018 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
2019#endif
2020 if (m == NULL)
2021 {
2022 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
2023 return;
2024 }
2025 ehdr = mtod(m, struct ethhdr *);
2026 memset(ehdr->h_source, 0xff, ETH_ALEN);
2027 ahdr = (struct arphdr *)&ehdr[1];
2028 ahdr->ar_hrd = RT_H2N_U16_C(1);
2029 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2030 ahdr->ar_hln = ETH_ALEN;
2031 ahdr->ar_pln = 4;
2032 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2033 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2034 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2035 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2036 *(uint32_t *)ahdr->ar_tip = dst;
2037#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2038 m->m_data += if_maxlinkhdr;
2039 m->m_len = sizeof(struct arphdr);
2040#else
2041 /* warn!!! should falls in mbuf minimal size */
2042 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2043 m->m_data += ETH_HLEN;
2044 m->m_len -= ETH_HLEN;
2045#endif
2046 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2047}
2048
2049int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2050{
2051 if (slirp_arp_cache_update(pData, dst, mac))
2052 slirp_arp_cache_add(pData, dst, mac);
2053
2054 return 0;
2055}
2056
2057/* updates the arp cache
2058 * @returns 0 - if has found and updated
2059 * 1 - if hasn't found.
2060 */
2061int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2062{
2063 struct arp_cache_entry *ac;
2064 LIST_FOREACH(ac, &pData->arp_cache, list)
2065 {
2066 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
2067 {
2068 ac->ip = dst;
2069 return 0;
2070 }
2071 }
2072 return 1;
2073}
2074
2075void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2076{
2077 struct arp_cache_entry *ac = NULL;
2078 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2079 if (ac == NULL)
2080 {
2081 LogRel(("NAT: Can't allocate arp cache entry\n"));
2082 return;
2083 }
2084 ac->ip = ip;
2085 memcpy(ac->ether, ether, ETH_ALEN);
2086 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2087}
2088
2089#ifdef VBOX_WITH_SLIRP_BSD_MBUF
2090void slirp_set_mtu(PNATState pData, int mtu)
2091{
2092 if (mtu < 20 || mtu >= 16000)
2093 {
2094 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2095 mtu = 1500;
2096 }
2097 if_mtu =
2098 if_mru = mtu;
2099}
2100#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette