VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 28166

Last change on this file since 28166 was 28166, checked in by vboxsync, 15 years ago

NAT: fixed polling of the ICMP socket reducing the ICMP latency

  • Property svn:eol-style set to native
File size: 63.7 KB
Line 
1#include "slirp.h"
2#ifdef RT_OS_OS2
3# include <paths.h>
4#endif
5
6#include <VBox/err.h>
7#include <VBox/pdmdrv.h>
8#include <iprt/assert.h>
9#include <iprt/file.h>
10#ifndef RT_OS_WINDOWS
11# include <sys/ioctl.h>
12# include <poll.h>
13#else
14# include <Winnls.h>
15# define _WINSOCK2API_
16# include <IPHlpApi.h>
17#endif
18#include <alias.h>
19
20#ifndef RT_OS_WINDOWS
21
22# define DO_ENGAGE_EVENT1(so, fdset, label) \
23 do { \
24 if ( so->so_poll_index != -1 \
25 && so->s == polls[so->so_poll_index].fd) \
26 { \
27 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
28 break; \
29 } \
30 AssertRelease(poll_index < (nfds)); \
31 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
32 polls[poll_index].fd = (so)->s; \
33 (so)->so_poll_index = poll_index; \
34 polls[poll_index].events = N_(fdset ## _poll); \
35 polls[poll_index].revents = 0; \
36 poll_index++; \
37 } while (0)
38
39# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
40 do { \
41 if ( so->so_poll_index != -1 \
42 && so->s == polls[so->so_poll_index].fd) \
43 { \
44 polls[so->so_poll_index].events |= \
45 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
46 break; \
47 } \
48 AssertRelease(poll_index < (nfds)); \
49 polls[poll_index].fd = (so)->s; \
50 (so)->so_poll_index = poll_index; \
51 polls[poll_index].events = \
52 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
53 poll_index++; \
54 } while (0)
55
56# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
57
58#if 0/** @todo This doesn't work because linux sets both POLLHUP and POLLERR when the
59socket is closed. @bugref{4811} Please verif the changed test. */
60# define DO_CHECK_FD_SET(so, events, fdset) \
61 ( ((so)->so_poll_index != -1) \
62 && ((so)->so_poll_index <= ndfs) \
63 && ((so)->s == polls[so->so_poll_index].fd) \
64 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
65 && !(polls[(so)->so_poll_index].revents & (POLLERR|POLLNVAL)))
66#else
67# define DO_CHECK_FD_SET(so, events, fdset) \
68 ( ((so)->so_poll_index != -1) \
69 && ((so)->so_poll_index <= ndfs) \
70 && ((so)->s == polls[so->so_poll_index].fd) \
71 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
72 && !(polls[(so)->so_poll_index].revents & POLLNVAL))
73#endif
74
75 /* specific for Unix API */
76# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
77 /* specific for Windows Winsock API */
78# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
79
80# ifndef RT_OS_LINUX
81# define readfds_poll (POLLRDNORM)
82# define writefds_poll (POLLWRNORM)
83# else
84# define readfds_poll (POLLIN)
85# define writefds_poll (POLLOUT)
86# endif
87# define xfds_poll (POLLPRI)
88# define closefds_poll (POLLHUP)
89# define rderr_poll (POLLERR)
90# define rdhup_poll (POLLHUP)
91# define nval_poll (POLLNVAL)
92
93# define ICMP_ENGAGE_EVENT(so, fdset) \
94 do { \
95 if (pData->icmp_socket.s != -1) \
96 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
97 } while (0)
98
99#else /* RT_OS_WINDOWS */
100
101/*
102 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
103 * So no call to WSAEventSelect necessary.
104 */
105# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
106
107/*
108 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
109 */
110# define DO_ENGAGE_EVENT1(so, fdset1, label) \
111 do { \
112 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
113 if (rc == SOCKET_ERROR) \
114 { \
115 /* This should not happen */ \
116 error = WSAGetLastError(); \
117 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
118 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
119 } \
120 } while (0); \
121 CONTINUE(label)
122
123# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
124 DO_ENGAGE_EVENT1((so), (fdset1), label)
125
126# define DO_POLL_EVENTS(rc, error, so, events, label) \
127 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
128 if ((rc) == SOCKET_ERROR) \
129 { \
130 (error) = WSAGetLastError(); \
131 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
132 CONTINUE(label); \
133 }
134
135# define acceptds_win FD_ACCEPT
136# define acceptds_win_bit FD_ACCEPT_BIT
137# define readfds_win FD_READ
138# define readfds_win_bit FD_READ_BIT
139# define writefds_win FD_WRITE
140# define writefds_win_bit FD_WRITE_BIT
141# define xfds_win FD_OOB
142# define xfds_win_bit FD_OOB_BIT
143# define closefds_win FD_CLOSE
144# define closefds_win_bit FD_CLOSE_BIT
145
146# define closefds_win FD_CLOSE
147# define closefds_win_bit FD_CLOSE_BIT
148
149# define DO_CHECK_FD_SET(so, events, fdset) \
150 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
151
152# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
153# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
154
155#endif /* RT_OS_WINDOWS */
156
157#define TCP_ENGAGE_EVENT1(so, fdset) \
158 DO_ENGAGE_EVENT1((so), fdset, tcp)
159
160#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
161 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
162
163#define UDP_ENGAGE_EVENT(so, fdset) \
164 DO_ENGAGE_EVENT1((so), fdset, udp)
165
166#define POLL_TCP_EVENTS(rc, error, so, events) \
167 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
168
169#define POLL_UDP_EVENTS(rc, error, so, events) \
170 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
171
172#define CHECK_FD_SET(so, events, set) \
173 (DO_CHECK_FD_SET((so), (events), set))
174
175#define WIN_CHECK_FD_SET(so, events, set) \
176 (DO_WIN_CHECK_FD_SET((so), (events), set))
177
178#define UNIX_CHECK_FD_SET(so, events, set) \
179 (DO_UNIX_CHECK_FD_SET(so, events, set))
180
181/*
182 * Loging macros
183 */
184#if VBOX_WITH_DEBUG_NAT_SOCKETS
185# if defined(RT_OS_WINDOWS)
186# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
187 do { \
188 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
189 } while (0)
190# else /* !RT_OS_WINDOWS */
191# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
192 do { \
193 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
194 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
195 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
196 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
197 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
198 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
199 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
200 } while (0)
201# endif /* !RT_OS_WINDOWS */
202#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
203# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
204#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
205
206#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
207 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
208
209static void activate_port_forwarding(PNATState, const uint8_t *pEther);
210
211static const uint8_t special_ethaddr[6] =
212{
213 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
214};
215
216static const uint8_t broadcast_ethaddr[6] =
217{
218 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
219};
220
221const uint8_t zerro_ethaddr[6] =
222{
223 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
224};
225
226#ifdef RT_OS_WINDOWS
227static int get_dns_addr_domain(PNATState pData, bool fVerbose,
228 struct in_addr *pdns_addr,
229 const char **ppszDomain)
230{
231 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
232 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
233 PIP_ADAPTER_ADDRESSES pAddr = NULL;
234 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
235 ULONG size;
236 int wlen = 0;
237 char *pszSuffix;
238 struct dns_domain_entry *pDomain = NULL;
239 ULONG ret = ERROR_SUCCESS;
240
241 /* @todo add SKIPing flags to get only required information */
242
243 /* determine size of buffer */
244 size = 0;
245 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
246 if (ret != ERROR_BUFFER_OVERFLOW)
247 {
248 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
249 return -1;
250 }
251 if (size == 0)
252 {
253 LogRel(("NAT: Win socket API returns non capacity\n"));
254 return -1;
255 }
256
257 pAdapterAddr = RTMemAllocZ(size);
258 if (!pAdapterAddr)
259 {
260 LogRel(("NAT: No memory available \n"));
261 return -1;
262 }
263 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
264 if (ret != ERROR_SUCCESS)
265 {
266 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
267 RTMemFree(pAdapterAddr);
268 return -1;
269 }
270
271 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
272 {
273 int found;
274 if (pAddr->OperStatus != IfOperStatusUp)
275 continue;
276
277 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
278 {
279 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
280 struct in_addr InAddr;
281 struct dns_entry *pDns;
282
283 if (SockAddr->sa_family != AF_INET)
284 continue;
285
286 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
287
288 /* add dns server to list */
289 pDns = RTMemAllocZ(sizeof(struct dns_entry));
290 if (!pDns)
291 {
292 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
293 RTMemFree(pAdapterAddr);
294 return VERR_NO_MEMORY;
295 }
296
297 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
298 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
299 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
300 else
301 pDns->de_addr.s_addr = InAddr.s_addr;
302
303 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
304
305 if (pAddr->DnsSuffix == NULL)
306 continue;
307
308 /* uniq */
309 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
310 if (!pszSuffix || strlen(pszSuffix) == 0)
311 {
312 RTStrFree(pszSuffix);
313 continue;
314 }
315
316 found = 0;
317 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
318 {
319 if ( pDomain->dd_pszDomain != NULL
320 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
321 {
322 found = 1;
323 RTStrFree(pszSuffix);
324 break;
325 }
326 }
327 if (!found)
328 {
329 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
330 if (!pDomain)
331 {
332 LogRel(("NAT: not enough memory\n"));
333 RTStrFree(pszSuffix);
334 RTMemFree(pAdapterAddr);
335 return VERR_NO_MEMORY;
336 }
337 pDomain->dd_pszDomain = pszSuffix;
338 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
339 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
340 }
341 }
342 }
343 RTMemFree(pAdapterAddr);
344 return 0;
345}
346
347#else /* !RT_OS_WINDOWS */
348
349static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
350{
351 size_t cbRead;
352 char bTest;
353 int rc = VERR_NO_MEMORY;
354 char *pu8Buf = (char *)pvBuf;
355 *pcbRead = 0;
356
357 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
358 && (pu8Buf - (char *)pvBuf) < cbBufSize)
359 {
360 if (cbRead == 0)
361 return VERR_EOF;
362
363 if (bTest == '\r' || bTest == '\n')
364 {
365 *pu8Buf = 0;
366 return VINF_SUCCESS;
367 }
368 *pu8Buf = bTest;
369 pu8Buf++;
370 (*pcbRead)++;
371 }
372 return rc;
373}
374
375static int get_dns_addr_domain(PNATState pData, bool fVerbose,
376 struct in_addr *pdns_addr,
377 const char **ppszDomain)
378{
379 char buff[512];
380 char buff2[256];
381 RTFILE f;
382 int fFoundNameserver = 0;
383 struct in_addr tmp_addr;
384 int rc;
385 size_t bytes;
386
387# ifdef RT_OS_OS2
388 /* Try various locations. */
389 char *etc = getenv("ETC");
390 if (etc)
391 {
392 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
393 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
394 }
395 if (RT_FAILURE(rc))
396 {
397 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
398 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
399 }
400 if (RT_FAILURE(rc))
401 {
402 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
403 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
404 }
405# else /* !RT_OS_OS2 */
406# ifndef DEBUG_vvl
407 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
408# else
409 char *home = getenv("HOME");
410 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
411 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
412 if (RT_SUCCESS(rc))
413 {
414 Log(("NAT: DNS we're using %s\n", buff));
415 }
416 else
417 {
418 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
419 Log(("NAT: DNS we're using %s\n", buff));
420 }
421# endif
422# endif /* !RT_OS_OS2 */
423 if (RT_FAILURE(rc))
424 return -1;
425
426 if (ppszDomain)
427 *ppszDomain = NULL;
428
429 Log(("NAT: DNS Servers:\n"));
430 while ( RT_SUCCESS(rc = RTFileGets(f, buff, 512, &bytes))
431 && rc != VERR_EOF)
432 {
433 struct dns_entry *pDns = NULL;
434 if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1)
435 {
436 if (!inet_aton(buff2, &tmp_addr))
437 continue;
438
439 /* localhost mask */
440 pDns = RTMemAllocZ(sizeof (struct dns_entry));
441 if (!pDns)
442 {
443 LogRel(("can't alloc memory for DNS entry\n"));
444 return -1;
445 }
446
447 /* check */
448 pDns->de_addr.s_addr = tmp_addr.s_addr;
449 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
450 {
451 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
452 }
453 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
454 fFoundNameserver++;
455 }
456 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
457 {
458 char *tok;
459 char *saveptr;
460 struct dns_domain_entry *pDomain = NULL;
461 int fFoundDomain = 0;
462 tok = strtok_r(&buff[6], " \t\n", &saveptr);
463 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
464 {
465 if ( tok != NULL
466 && strcmp(tok, pDomain->dd_pszDomain) == 0)
467 {
468 fFoundDomain = 1;
469 break;
470 }
471 }
472 if (tok != NULL && !fFoundDomain)
473 {
474 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
475 if (!pDomain)
476 {
477 LogRel(("NAT: not enought memory to add domain list\n"));
478 return VERR_NO_MEMORY;
479 }
480 pDomain->dd_pszDomain = RTStrDup(tok);
481 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
482 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
483 }
484 }
485 }
486 RTFileClose(f);
487 if (!fFoundNameserver)
488 return -1;
489 return 0;
490}
491
492#endif /* !RT_OS_WINDOWS */
493
494static int slirp_init_dns_list(PNATState pData)
495{
496 TAILQ_INIT(&pData->pDnsList);
497 LIST_INIT(&pData->pDomainList);
498 return get_dns_addr_domain(pData, true, NULL, NULL);
499}
500
501static void slirp_release_dns_list(PNATState pData)
502{
503 struct dns_entry *pDns = NULL;
504 struct dns_domain_entry *pDomain = NULL;
505
506 while (!TAILQ_EMPTY(&pData->pDnsList))
507 {
508 pDns = TAILQ_FIRST(&pData->pDnsList);
509 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
510 RTMemFree(pDns);
511 }
512
513 while (!LIST_EMPTY(&pData->pDomainList))
514 {
515 pDomain = LIST_FIRST(&pData->pDomainList);
516 LIST_REMOVE(pDomain, dd_list);
517 if (pDomain->dd_pszDomain != NULL)
518 RTStrFree(pDomain->dd_pszDomain);
519 RTMemFree(pDomain);
520 }
521}
522
523int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
524{
525 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
526}
527
528#ifndef VBOX_WITH_NAT_SERVICE
529int slirp_init(PNATState *ppData, const char *pszNetAddr, uint32_t u32Netmask,
530 bool fPassDomain, bool fUseHostResolver, void *pvUser)
531#else
532int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
533 bool fPassDomain, bool fUseHostResolver, void *pvUser)
534#endif
535{
536 int fNATfailed = 0;
537 int rc;
538 PNATState pData = RTMemAllocZ(sizeof(NATState));
539 *ppData = pData;
540 if (!pData)
541 return VERR_NO_MEMORY;
542 if (u32Netmask & 0x1f)
543 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
544 return VERR_INVALID_PARAMETER;
545 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
546 pData->use_host_resolver = fUseHostResolver;
547 pData->pvUser = pvUser;
548 pData->netmask = u32Netmask;
549
550 /* sockets & TCP defaults */
551 pData->socket_rcv = 64 * _1K;
552 pData->socket_snd = 64 * _1K;
553 tcp_sndspace = 64 * _1K;
554 tcp_rcvspace = 64 * _1K;
555
556#ifdef RT_OS_WINDOWS
557 {
558 WSADATA Data;
559 WSAStartup(MAKEWORD(2, 0), &Data);
560 }
561 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
562#endif
563#ifdef VBOX_WITH_SLIRP_MT
564 QSOCKET_LOCK_CREATE(tcb);
565 QSOCKET_LOCK_CREATE(udb);
566 rc = RTReqCreateQueue(&pData->pReqQueue);
567 AssertReleaseRC(rc);
568#endif
569
570 link_up = 1;
571
572 rc = bootp_dhcp_init(pData);
573 if (rc != 0)
574 {
575 LogRel(("NAT: DHCP server initialization was failed\n"));
576 return VINF_NAT_DNS;
577 }
578 debug_init();
579 if_init(pData);
580 ip_init(pData);
581 icmp_init(pData);
582
583 /* Initialise mbufs *after* setting the MTU */
584#ifndef VBOX_WITH_SLIRP_BSD_MBUF
585 m_init(pData);
586#else
587 mbuf_init(pData);
588#endif
589
590#ifndef VBOX_WITH_NAT_SERVICE
591 inet_aton(pszNetAddr, &pData->special_addr);
592#else
593 pData->special_addr.s_addr = u32NetAddr;
594#endif
595 pData->slirp_ethaddr = &special_ethaddr[0];
596 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
597 /* @todo: add ability to configure this staff */
598
599 /* set default addresses */
600 inet_aton("127.0.0.1", &loopback_addr);
601 if (!pData->use_host_resolver)
602 {
603 if (slirp_init_dns_list(pData) < 0)
604 fNATfailed = 1;
605
606 dnsproxy_init(pData);
607 }
608
609 getouraddr(pData);
610 {
611 int flags = 0;
612 struct in_addr proxy_addr;
613 pData->proxy_alias = LibAliasInit(pData, NULL);
614 if (pData->proxy_alias == NULL)
615 {
616 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
617 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
618 }
619 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
620#ifndef NO_FW_PUNCH
621 flags |= PKT_ALIAS_PUNCH_FW;
622#endif
623 flags |= PKT_ALIAS_LOG; /* set logging */
624 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
625 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
626 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
627 ftp_alias_load(pData);
628 nbt_alias_load(pData);
629 if (pData->use_host_resolver)
630 dns_alias_load(pData);
631 }
632 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
633}
634
635/**
636 * Register statistics.
637 */
638void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
639{
640#ifdef VBOX_WITH_STATISTICS
641# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
642# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
643# include "counters.h"
644# undef COUNTER
645/** @todo register statistics for the variables dumped by:
646 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
647 * mbufstats(pData); sockstats(pData); */
648#endif /* VBOX_WITH_STATISTICS */
649}
650
651/**
652 * Deregister statistics.
653 */
654void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
655{
656 if (pData == NULL)
657 return;
658#ifdef VBOX_WITH_STATISTICS
659# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
660# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
661# include "counters.h"
662#endif /* VBOX_WITH_STATISTICS */
663}
664
665/**
666 * Marks the link as up, making it possible to establish new connections.
667 */
668void slirp_link_up(PNATState pData)
669{
670 struct arp_cache_entry *ac;
671 link_up = 1;
672
673 if (LIST_EMPTY(&pData->arp_cache))
674 return;
675
676 LIST_FOREACH(ac, &pData->arp_cache, list)
677 {
678 activate_port_forwarding(pData, ac->ether);
679 }
680}
681
682/**
683 * Marks the link as down and cleans up the current connections.
684 */
685void slirp_link_down(PNATState pData)
686{
687 struct socket *so;
688 struct port_forward_rule *rule;
689
690 while ((so = tcb.so_next) != &tcb)
691 {
692 if (so->so_state & SS_NOFDREF || so->s == -1)
693 sofree(pData, so);
694 else
695 tcp_drop(pData, sototcpcb(so), 0);
696 }
697
698 while ((so = udb.so_next) != &udb)
699 udp_detach(pData, so);
700
701 /*
702 * Clear the active state of port-forwarding rules to force
703 * re-setup on restoration of communications.
704 */
705 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
706 {
707 rule->activated = 0;
708 }
709 pData->cRedirectionsActive = 0;
710
711 link_up = 0;
712}
713
714/**
715 * Terminates the slirp component.
716 */
717void slirp_term(PNATState pData)
718{
719 if (pData == NULL)
720 return;
721#ifdef RT_OS_WINDOWS
722 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
723 FreeLibrary(pData->hmIcmpLibrary);
724 RTMemFree(pData->pvIcmpBuffer);
725#else
726 closesocket(pData->icmp_socket.s);
727#endif
728
729 slirp_link_down(pData);
730 slirp_release_dns_list(pData);
731 ftp_alias_unload(pData);
732 nbt_alias_unload(pData);
733 if (pData->use_host_resolver)
734 dns_alias_unload(pData);
735 while (!LIST_EMPTY(&instancehead))
736 {
737 struct libalias *la = LIST_FIRST(&instancehead);
738 /* libalias do all clean up */
739 LibAliasUninit(la);
740 }
741 while (!LIST_EMPTY(&pData->arp_cache))
742 {
743 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
744 LIST_REMOVE(ac, list);
745 RTMemFree(ac);
746 }
747 bootp_dhcp_fini(pData);
748 m_fini(pData);
749#ifdef RT_OS_WINDOWS
750 WSACleanup();
751#endif
752#ifdef LOG_ENABLED
753 Log(("\n"
754 "NAT statistics\n"
755 "--------------\n"
756 "\n"));
757 ipstats(pData);
758 tcpstats(pData);
759 udpstats(pData);
760 icmpstats(pData);
761 mbufstats(pData);
762 sockstats(pData);
763 Log(("\n"
764 "\n"
765 "\n"));
766#endif
767 RTMemFree(pData);
768}
769
770
771#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
772#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
773
774/*
775 * curtime kept to an accuracy of 1ms
776 */
777static void updtime(PNATState pData)
778{
779#ifdef RT_OS_WINDOWS
780 struct _timeb tb;
781
782 _ftime(&tb);
783 curtime = (u_int)tb.time * (u_int)1000;
784 curtime += (u_int)tb.millitm;
785#else
786 gettimeofday(&tt, 0);
787
788 curtime = (u_int)tt.tv_sec * (u_int)1000;
789 curtime += (u_int)tt.tv_usec / (u_int)1000;
790
791 if ((tt.tv_usec % 1000) >= 500)
792 curtime++;
793#endif
794}
795
796#ifdef RT_OS_WINDOWS
797void slirp_select_fill(PNATState pData, int *pnfds)
798#else /* RT_OS_WINDOWS */
799void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
800#endif /* !RT_OS_WINDOWS */
801{
802 struct socket *so, *so_next;
803 int nfds;
804#if defined(RT_OS_WINDOWS)
805 int rc;
806 int error;
807#else
808 int poll_index = 0;
809#endif
810 int i;
811
812 STAM_PROFILE_START(&pData->StatFill, a);
813
814 nfds = *pnfds;
815
816 /*
817 * First, TCP sockets
818 */
819 do_slowtimo = 0;
820 if (!link_up)
821 goto done;
822
823 /*
824 * *_slowtimo needs calling if there are IP fragments
825 * in the fragment queue, or there are TCP connections active
826 */
827 /* XXX:
828 * triggering of fragment expiration should be the same but use new macroses
829 */
830 do_slowtimo = (tcb.so_next != &tcb);
831 if (!do_slowtimo)
832 {
833 for (i = 0; i < IPREASS_NHASH; i++)
834 {
835 if (!TAILQ_EMPTY(&ipq[i]))
836 {
837 do_slowtimo = 1;
838 break;
839 }
840 }
841 }
842 /* always add the ICMP socket */
843#ifndef RT_OS_WINDOWS
844 pData->icmp_socket.so_poll_index = -1;
845#endif
846 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
847
848 STAM_COUNTER_RESET(&pData->StatTCP);
849 STAM_COUNTER_RESET(&pData->StatTCPHot);
850
851 QSOCKET_FOREACH(so, so_next, tcp)
852 /* { */
853#if !defined(RT_OS_WINDOWS)
854 so->so_poll_index = -1;
855#endif
856#ifndef VBOX_WITH_SLIRP_BSD_MBUF
857 if (pData->fmbuf_water_line == 1)
858 {
859 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
860 {
861 pData->fmbuf_water_warn_sent = 0;
862 pData->fmbuf_water_line = 0;
863 }
864# ifndef RT_OS_WINDOWS
865 poll_index = 0;
866# endif
867 goto done;
868 }
869#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
870 STAM_COUNTER_INC(&pData->StatTCP);
871
872 /*
873 * See if we need a tcp_fasttimo
874 */
875 if ( time_fasttimo == 0
876 && so->so_tcpcb != NULL
877 && so->so_tcpcb->t_flags & TF_DELACK)
878 {
879 time_fasttimo = curtime; /* Flag when we want a fasttimo */
880 }
881
882 /*
883 * NOFDREF can include still connecting to local-host,
884 * newly socreated() sockets etc. Don't want to select these.
885 */
886 if (so->so_state & SS_NOFDREF || so->s == -1)
887 CONTINUE(tcp);
888
889 /*
890 * Set for reading sockets which are accepting
891 */
892 if (so->so_state & SS_FACCEPTCONN)
893 {
894 STAM_COUNTER_INC(&pData->StatTCPHot);
895 TCP_ENGAGE_EVENT1(so, readfds);
896 CONTINUE(tcp);
897 }
898
899 /*
900 * Set for writing sockets which are connecting
901 */
902 if (so->so_state & SS_ISFCONNECTING)
903 {
904 Log2(("connecting %R[natsock] engaged\n",so));
905 STAM_COUNTER_INC(&pData->StatTCPHot);
906 TCP_ENGAGE_EVENT1(so, writefds);
907 }
908
909 /*
910 * Set for writing if we are connected, can send more, and
911 * we have something to send
912 */
913 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
914 {
915 STAM_COUNTER_INC(&pData->StatTCPHot);
916 TCP_ENGAGE_EVENT1(so, writefds);
917 }
918
919 /*
920 * Set for reading (and urgent data) if we are connected, can
921 * receive more, and we have room for it XXX /2 ?
922 */
923 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
924 {
925 STAM_COUNTER_INC(&pData->StatTCPHot);
926 TCP_ENGAGE_EVENT2(so, readfds, xfds);
927 }
928 LOOP_LABEL(tcp, so, so_next);
929 }
930
931 /*
932 * UDP sockets
933 */
934 STAM_COUNTER_RESET(&pData->StatUDP);
935 STAM_COUNTER_RESET(&pData->StatUDPHot);
936
937 QSOCKET_FOREACH(so, so_next, udp)
938 /* { */
939
940#ifndef VBOX_WITH_SLIRP_BSD_MBUF
941 if (pData->fmbuf_water_line == 1)
942 {
943 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
944 {
945 pData->fmbuf_water_line = 0;
946 pData->fmbuf_water_warn_sent = 0;
947 }
948# ifndef RT_OS_WINDOWS
949 poll_index = 0;
950# endif
951 goto done;
952 }
953#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
954 STAM_COUNTER_INC(&pData->StatUDP);
955#if !defined(RT_OS_WINDOWS)
956 so->so_poll_index = -1;
957#endif
958
959 /*
960 * See if it's timed out
961 */
962 if (so->so_expire)
963 {
964 if (so->so_expire <= curtime)
965 {
966 Log2(("NAT: %R[natsock] expired\n", so));
967 if (so->so_timeout != NULL)
968 {
969 so->so_timeout(pData, so, so->so_timeout_arg);
970 }
971#ifdef VBOX_WITH_SLIRP_MT
972 /* we need so_next for continue our cycle*/
973 so_next = so->so_next;
974#endif
975 UDP_DETACH(pData, so, so_next);
976 CONTINUE_NO_UNLOCK(udp);
977 }
978 else
979 {
980 do_slowtimo = 1; /* Let socket expire */
981 }
982 }
983
984 /*
985 * When UDP packets are received from over the link, they're
986 * sendto()'d straight away, so no need for setting for writing
987 * Limit the number of packets queued by this session to 4.
988 * Note that even though we try and limit this to 4 packets,
989 * the session could have more queued if the packets needed
990 * to be fragmented.
991 *
992 * (XXX <= 4 ?)
993 */
994 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
995 {
996 STAM_COUNTER_INC(&pData->StatUDPHot);
997 UDP_ENGAGE_EVENT(so, readfds);
998 }
999 LOOP_LABEL(udp, so, so_next);
1000 }
1001done:
1002
1003#if defined(RT_OS_WINDOWS)
1004 *pnfds = VBOX_EVENT_COUNT;
1005#else /* RT_OS_WINDOWS */
1006 AssertRelease(poll_index <= *pnfds);
1007 *pnfds = poll_index;
1008#endif /* !RT_OS_WINDOWS */
1009
1010 STAM_PROFILE_STOP(&pData->StatFill, a);
1011}
1012
1013#if defined(RT_OS_WINDOWS)
1014void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1015#else /* RT_OS_WINDOWS */
1016void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1017#endif /* !RT_OS_WINDOWS */
1018{
1019 struct socket *so, *so_next;
1020 int ret;
1021#if defined(RT_OS_WINDOWS)
1022 WSANETWORKEVENTS NetworkEvents;
1023 int rc;
1024 int error;
1025#else
1026 int poll_index = 0;
1027#endif
1028
1029 STAM_PROFILE_START(&pData->StatPoll, a);
1030
1031 /* Update time */
1032 updtime(pData);
1033
1034 /*
1035 * See if anything has timed out
1036 */
1037 if (link_up)
1038 {
1039 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1040 {
1041 STAM_PROFILE_START(&pData->StatFastTimer, b);
1042 tcp_fasttimo(pData);
1043 time_fasttimo = 0;
1044 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1045 }
1046 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1047 {
1048 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1049 ip_slowtimo(pData);
1050 tcp_slowtimo(pData);
1051 last_slowtimo = curtime;
1052 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1053 }
1054 }
1055#if defined(RT_OS_WINDOWS)
1056 if (fTimeout)
1057 return; /* only timer update */
1058#endif
1059
1060 /*
1061 * Check sockets
1062 */
1063 if (!link_up)
1064 goto done;
1065#if defined(RT_OS_WINDOWS)
1066 /*XXX: before renaming please make see define
1067 * fIcmp in slirp_state.h
1068 */
1069 if (fIcmp)
1070 sorecvfrom(pData, &pData->icmp_socket);
1071#else
1072 if ( (pData->icmp_socket.s != -1)
1073 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1074 sorecvfrom(pData, &pData->icmp_socket);
1075#endif
1076 /*
1077 * Check TCP sockets
1078 */
1079 QSOCKET_FOREACH(so, so_next, tcp)
1080 /* { */
1081#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1082 if (pData->fmbuf_water_line == 1)
1083 {
1084 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1085 {
1086 pData->fmbuf_water_line = 0;
1087 pData->fmbuf_water_warn_sent = 0;
1088 }
1089 goto done;
1090 }
1091#endif
1092
1093#ifdef VBOX_WITH_SLIRP_MT
1094 if ( so->so_state & SS_NOFDREF
1095 && so->so_deleted == 1)
1096 {
1097 struct socket *son, *sop = NULL;
1098 QSOCKET_LOCK(tcb);
1099 if (so->so_next != NULL)
1100 {
1101 if (so->so_next != &tcb)
1102 SOCKET_LOCK(so->so_next);
1103 son = so->so_next;
1104 }
1105 if ( so->so_prev != &tcb
1106 && so->so_prev != NULL)
1107 {
1108 SOCKET_LOCK(so->so_prev);
1109 sop = so->so_prev;
1110 }
1111 QSOCKET_UNLOCK(tcb);
1112 remque(pData, so);
1113 NSOCK_DEC();
1114 SOCKET_UNLOCK(so);
1115 SOCKET_LOCK_DESTROY(so);
1116 RTMemFree(so);
1117 so_next = son;
1118 if (sop != NULL)
1119 SOCKET_UNLOCK(sop);
1120 CONTINUE_NO_UNLOCK(tcp);
1121 }
1122#endif
1123 /*
1124 * FD_ISSET is meaningless on these sockets
1125 * (and they can crash the program)
1126 */
1127 if (so->so_state & SS_NOFDREF || so->s == -1)
1128 CONTINUE(tcp);
1129
1130 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1131
1132 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1133
1134
1135 /*
1136 * Check for URG data
1137 * This will soread as well, so no need to
1138 * test for readfds below if this succeeds
1139 */
1140
1141 /* out-of-band data */
1142 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1143#ifdef RT_OS_DARWIN
1144 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1145 * combination on other Unixs hosts doesn't enter to this branch
1146 */
1147 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1148#endif
1149 )
1150 {
1151 sorecvoob(pData, so);
1152 }
1153
1154 /*
1155 * Check sockets for reading
1156 */
1157 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1158 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1159 {
1160 /*
1161 * Check for incoming connections
1162 */
1163 if (so->so_state & SS_FACCEPTCONN)
1164 {
1165 TCP_CONNECT(pData, so);
1166 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1167 CONTINUE(tcp);
1168 }
1169
1170 ret = soread(pData, so);
1171 /* Output it if we read something */
1172 if (RT_LIKELY(ret > 0))
1173 TCP_OUTPUT(pData, sototcpcb(so));
1174 }
1175
1176 /*
1177 * Check for FD_CLOSE events.
1178 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1179 */
1180 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1181 || (so->so_close == 1))
1182 {
1183 /*
1184 * drain the socket
1185 */
1186 for (;;)
1187 {
1188 ret = soread(pData, so);
1189 if (ret > 0)
1190 TCP_OUTPUT(pData, sototcpcb(so));
1191 else
1192 {
1193 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1194 break;
1195 }
1196 }
1197 /* mark the socket for termination _after_ it was drained */
1198 so->so_close = 1;
1199 CONTINUE(tcp);
1200 }
1201
1202 /*
1203 * Check sockets for writing
1204 */
1205 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1206 {
1207 /*
1208 * Check for non-blocking, still-connecting sockets
1209 */
1210 if (so->so_state & SS_ISFCONNECTING)
1211 {
1212 Log2(("connecting %R[natsock] catched\n", so));
1213 /* Connected */
1214 so->so_state &= ~SS_ISFCONNECTING;
1215
1216 /*
1217 * This should be probably guarded by PROBE_CONN too. Anyway,
1218 * we disable it on OS/2 because the below send call returns
1219 * EFAULT which causes the opened TCP socket to close right
1220 * after it has been opened and connected.
1221 */
1222#ifndef RT_OS_OS2
1223 ret = send(so->s, (const char *)&ret, 0, 0);
1224 if (ret < 0)
1225 {
1226 /* XXXXX Must fix, zero bytes is a NOP */
1227 if ( errno == EAGAIN
1228 || errno == EWOULDBLOCK
1229 || errno == EINPROGRESS
1230 || errno == ENOTCONN)
1231 CONTINUE(tcp);
1232
1233 /* else failed */
1234 so->so_state = SS_NOFDREF;
1235 }
1236 /* else so->so_state &= ~SS_ISFCONNECTING; */
1237#endif
1238
1239 /*
1240 * Continue tcp_input
1241 */
1242 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1243 /* continue; */
1244 }
1245 else
1246 SOWRITE(ret, pData, so);
1247 /*
1248 * XXX If we wrote something (a lot), there could be the need
1249 * for a window update. In the worst case, the remote will send
1250 * a window probe to get things going again.
1251 */
1252 }
1253
1254 /*
1255 * Probe a still-connecting, non-blocking socket
1256 * to check if it's still alive
1257 */
1258#ifdef PROBE_CONN
1259 if (so->so_state & SS_ISFCONNECTING)
1260 {
1261 ret = recv(so->s, (char *)&ret, 0, 0);
1262
1263 if (ret < 0)
1264 {
1265 /* XXX */
1266 if ( errno == EAGAIN
1267 || errno == EWOULDBLOCK
1268 || errno == EINPROGRESS
1269 || errno == ENOTCONN)
1270 {
1271 CONTINUE(tcp); /* Still connecting, continue */
1272 }
1273
1274 /* else failed */
1275 so->so_state = SS_NOFDREF;
1276
1277 /* tcp_input will take care of it */
1278 }
1279 else
1280 {
1281 ret = send(so->s, &ret, 0, 0);
1282 if (ret < 0)
1283 {
1284 /* XXX */
1285 if ( errno == EAGAIN
1286 || errno == EWOULDBLOCK
1287 || errno == EINPROGRESS
1288 || errno == ENOTCONN)
1289 {
1290 CONTINUE(tcp);
1291 }
1292 /* else failed */
1293 so->so_state = SS_NOFDREF;
1294 }
1295 else
1296 so->so_state &= ~SS_ISFCONNECTING;
1297
1298 }
1299 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1300 } /* SS_ISFCONNECTING */
1301#endif
1302 LOOP_LABEL(tcp, so, so_next);
1303 }
1304
1305 /*
1306 * Now UDP sockets.
1307 * Incoming packets are sent straight away, they're not buffered.
1308 * Incoming UDP data isn't buffered either.
1309 */
1310 QSOCKET_FOREACH(so, so_next, udp)
1311 /* { */
1312#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1313 if (pData->fmbuf_water_line == 1)
1314 {
1315 if (mbuf_alloced < pData->mbuf_water_line_limit/2)
1316 {
1317 pData->fmbuf_water_line = 0;
1318 pData->fmbuf_water_warn_sent = 0;
1319 }
1320 goto done;
1321 }
1322#endif
1323#ifdef VBOX_WITH_SLIRP_MT
1324 if ( so->so_state & SS_NOFDREF
1325 && so->so_deleted == 1)
1326 {
1327 struct socket *son, *sop = NULL;
1328 QSOCKET_LOCK(udb);
1329 if (so->so_next != NULL)
1330 {
1331 if (so->so_next != &udb)
1332 SOCKET_LOCK(so->so_next);
1333 son = so->so_next;
1334 }
1335 if ( so->so_prev != &udb
1336 && so->so_prev != NULL)
1337 {
1338 SOCKET_LOCK(so->so_prev);
1339 sop = so->so_prev;
1340 }
1341 QSOCKET_UNLOCK(udb);
1342 remque(pData, so);
1343 NSOCK_DEC();
1344 SOCKET_UNLOCK(so);
1345 SOCKET_LOCK_DESTROY(so);
1346 RTMemFree(so);
1347 so_next = son;
1348 if (sop != NULL)
1349 SOCKET_UNLOCK(sop);
1350 CONTINUE_NO_UNLOCK(udp);
1351 }
1352#endif
1353 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1354
1355 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1356
1357 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1358 {
1359 SORECVFROM(pData, so);
1360 }
1361 LOOP_LABEL(udp, so, so_next);
1362 }
1363
1364done:
1365#if 0
1366 /*
1367 * See if we can start outputting
1368 */
1369 if (if_queued && link_up)
1370 if_start(pData);
1371#endif
1372
1373 STAM_PROFILE_STOP(&pData->StatPoll, a);
1374}
1375
1376
1377struct arphdr
1378{
1379 unsigned short ar_hrd; /* format of hardware address */
1380 unsigned short ar_pro; /* format of protocol address */
1381 unsigned char ar_hln; /* length of hardware address */
1382 unsigned char ar_pln; /* length of protocol address */
1383 unsigned short ar_op; /* ARP opcode (command) */
1384
1385 /*
1386 * Ethernet looks like this : This bit is variable sized however...
1387 */
1388 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1389 unsigned char ar_sip[4]; /* sender IP address */
1390 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1391 unsigned char ar_tip[4]; /* target IP address */
1392};
1393AssertCompileSize(struct arphdr, 28);
1394
1395static void arp_input(PNATState pData, struct mbuf *m)
1396{
1397 struct ethhdr *eh;
1398 struct ethhdr *reh;
1399 struct arphdr *ah;
1400 struct arphdr *rah;
1401 int ar_op;
1402 struct ex_list *ex_ptr;
1403 uint32_t htip;
1404 uint32_t tip;
1405 struct mbuf *mr;
1406 eh = mtod(m, struct ethhdr *);
1407 ah = (struct arphdr *)&eh[1];
1408 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1409 tip = *(uint32_t*)ah->ar_tip;
1410
1411 ar_op = RT_N2H_U16(ah->ar_op);
1412
1413 switch (ar_op)
1414 {
1415 case ARPOP_REQUEST:
1416#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1417 mr = m_get(pData);
1418
1419 reh = mtod(mr, struct ethhdr *);
1420 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1421 Log4(("NAT: arp:%R[ether]->%R[ether]\n",
1422 reh->h_source, reh->h_dest));
1423 Log4(("NAT: arp: %R[IP4]\n", &tip));
1424
1425 mr->m_data += if_maxlinkhdr;
1426 mr->m_len = sizeof(struct arphdr);
1427 rah = mtod(mr, struct arphdr *);
1428#else
1429 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1430 reh = mtod(mr, struct ethhdr *);
1431 mr->m_data += ETH_HLEN;
1432 rah = mtod(mr, struct arphdr *);
1433 mr->m_len = sizeof(struct arphdr);
1434 Assert(mr);
1435 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1436#endif
1437#ifdef VBOX_WITH_NAT_SERVICE
1438 if (tip == pData->special_addr.s_addr)
1439 goto arp_ok;
1440#endif
1441 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1442 {
1443 if ( CTL_CHECK(htip, CTL_DNS)
1444 || CTL_CHECK(htip, CTL_ALIAS)
1445 || CTL_CHECK(htip, CTL_TFTP))
1446 goto arp_ok;
1447 for (ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next)
1448 {
1449 if ((htip & ~pData->netmask) == ex_ptr->ex_addr)
1450 {
1451 goto arp_ok;
1452 }
1453 }
1454 m_free(pData, m);
1455 m_free(pData, mr);
1456 return;
1457
1458 arp_ok:
1459 rah->ar_hrd = RT_H2N_U16_C(1);
1460 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1461 rah->ar_hln = ETH_ALEN;
1462 rah->ar_pln = 4;
1463 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1464 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1465
1466 switch (htip & ~pData->netmask)
1467 {
1468 case CTL_DNS:
1469 case CTL_ALIAS:
1470 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1471 break;
1472 default:;
1473 }
1474
1475 memcpy(rah->ar_sip, ah->ar_tip, 4);
1476 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1477 memcpy(rah->ar_tip, ah->ar_sip, 4);
1478 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1479 m_free(pData, m);
1480 }
1481 /* Gratuitous ARP */
1482 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1483 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1484 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1485 {
1486 /* we've received anounce about address asignment
1487 * Let's do ARP cache update
1488 */
1489 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1490 {
1491 m_free(pData, mr);
1492 m_free(pData, m);
1493 break;
1494 }
1495 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1496 }
1497 break;
1498
1499 case ARPOP_REPLY:
1500 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1501 {
1502 m_free(pData, m);
1503 break;
1504 }
1505 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1506 m_free(pData, m);
1507 break;
1508
1509 default:
1510 break;
1511 }
1512}
1513
1514/**
1515 * Feed a packet into the slirp engine.
1516 *
1517 * @param m Data buffer, m_len is not valid.
1518 * @param cbBuf The length of the data in m.
1519 */
1520void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1521{
1522 int proto;
1523 static bool fWarnedIpv6;
1524 struct ethhdr *eh;
1525 uint8_t au8Ether[ETH_ALEN];
1526
1527 m->m_len = cbBuf;
1528 if (cbBuf < ETH_HLEN)
1529 {
1530 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1531 m_free(pData, m);
1532 return;
1533 }
1534 eh = mtod(m, struct ethhdr *);
1535 proto = RT_N2H_U16(eh->h_proto);
1536
1537 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1538
1539 switch(proto)
1540 {
1541 case ETH_P_ARP:
1542 arp_input(pData, m);
1543 break;
1544
1545 case ETH_P_IP:
1546 /* Update time. Important if the network is very quiet, as otherwise
1547 * the first outgoing connection gets an incorrect timestamp. */
1548 updtime(pData);
1549 m_adj(m, ETH_HLEN);
1550#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1551 M_ASSERTPKTHDR(m);
1552 m->m_pkthdr.header = mtod(m, void *);
1553#else /* !VBOX_WITH_SLIRP_BSD_MBUF */
1554 if ( pData->fmbuf_water_line
1555 && pData->fmbuf_water_warn_sent == 0
1556 && (curtime - pData->tsmbuf_water_warn_sent) > 500)
1557 {
1558 icmp_error(pData, m, ICMP_SOURCEQUENCH, 0, 0, "Out of resources!!!");
1559 pData->fmbuf_water_warn_sent = 1;
1560 pData->tsmbuf_water_warn_sent = curtime;
1561 }
1562#endif /* !VBOX_WITH_SLIRP_BSD_MBUF */
1563 ip_input(pData, m);
1564 break;
1565
1566 case ETH_P_IPV6:
1567 m_free(pData, m);
1568 if (!fWarnedIpv6)
1569 {
1570 LogRel(("NAT: IPv6 not supported\n"));
1571 fWarnedIpv6 = true;
1572 }
1573 break;
1574
1575 default:
1576 Log(("NAT: Unsupported protocol %x\n", proto));
1577 m_free(pData, m);
1578 break;
1579 }
1580
1581 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1582 activate_port_forwarding(pData, au8Ether);
1583}
1584
1585/* output the IP packet to the ethernet device */
1586void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1587{
1588 struct ethhdr *eh;
1589 uint8_t *buf = NULL;
1590 size_t mlen = 0;
1591 STAM_PROFILE_START(&pData->StatIF_encap, a);
1592
1593#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1594 m->m_data -= if_maxlinkhdr;
1595 m->m_len += ETH_HLEN;
1596 eh = mtod(m, struct ethhdr *);
1597
1598 if (MBUF_HEAD(m) != m->m_data)
1599 {
1600 LogRel(("NAT: ethernet detects corruption of the packet"));
1601 AssertMsgFailed(("!!Ethernet frame corrupted!!"));
1602 }
1603#else
1604 M_ASSERTPKTHDR(m);
1605 m->m_data -= ETH_HLEN;
1606 m->m_len += ETH_HLEN;
1607 eh = mtod(m, struct ethhdr *);
1608#endif
1609
1610 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1611 {
1612 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1613 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1614 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1615 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1616 {
1617 /* don't do anything */
1618 m_free(pData, m);
1619 goto done;
1620 }
1621 }
1622#ifndef VBOX_WITH_SLIRP_BSD_MBUF
1623 mlen = m->m_len;
1624#else
1625 mlen = m_length(m, NULL);
1626 buf = RTMemAlloc(mlen);
1627 if (buf == NULL)
1628 {
1629 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1630 m_free(pData, m);
1631 goto done;
1632 }
1633#endif
1634 eh->h_proto = RT_H2N_U16(eth_proto);
1635#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1636 m_copydata(m, 0, mlen, (char *)buf);
1637 if (flags & ETH_ENCAP_URG)
1638 slirp_urg_output(pData->pvUser, m, buf, mlen);
1639 else
1640 slirp_output(pData->pvUser, m, buf, mlen);
1641#else
1642 if (flags & ETH_ENCAP_URG)
1643 slirp_urg_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1644 else
1645 slirp_output(pData->pvUser, m, mtod(m, const uint8_t *), mlen);
1646#endif
1647done:
1648 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1649}
1650
1651/**
1652 * Still we're using dhcp server leasing to map ether to IP
1653 * @todo see rt_lookup_in_cache
1654 */
1655static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1656{
1657 uint32_t ip = INADDR_ANY;
1658 int rc;
1659
1660 if (eth_addr == NULL)
1661 return INADDR_ANY;
1662
1663 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1664 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1665 return INADDR_ANY;
1666
1667 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1668 if (RT_SUCCESS(rc))
1669 return ip;
1670
1671 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1672 /* ignore return code, ip will be set to INADDR_ANY on error */
1673 return ip;
1674}
1675
1676/**
1677 * We need check if we've activated port forwarding
1678 * for specific machine ... that of course relates to
1679 * service mode
1680 * @todo finish this for service case
1681 */
1682static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1683{
1684 struct port_forward_rule *rule;
1685
1686 /* check mac here */
1687 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1688 {
1689 struct socket *so;
1690 struct alias_link *alias_link;
1691 struct libalias *lib;
1692 int flags;
1693 struct sockaddr sa;
1694 struct sockaddr_in *psin;
1695 socklen_t socketlen;
1696 struct in_addr alias;
1697 int rc;
1698 uint32_t guest_addr; /* need to understand if we already give address to guest */
1699
1700 if (rule->activated)
1701 continue;
1702
1703#ifdef VBOX_WITH_NAT_SERVICE
1704 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1705 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1706 guest_addr = find_guest_ip(pData, h_source);
1707#else
1708#if 0
1709 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1710 continue;
1711#endif
1712 guest_addr = find_guest_ip(pData, h_source);
1713#endif
1714 if (guest_addr == INADDR_ANY)
1715 {
1716 /* the address wasn't granted */
1717 return;
1718 }
1719
1720#if !defined(VBOX_WITH_NAT_SERVICE)
1721 if (rule->guest_addr.s_addr != guest_addr)
1722 continue;
1723#endif
1724
1725 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1726 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1727 rule->host_port, rule->guest_port, &guest_addr));
1728
1729 if (rule->proto == IPPROTO_UDP)
1730 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1731 RT_H2N_U16(rule->guest_port), 0);
1732 else
1733 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1734 RT_H2N_U16(rule->guest_port), 0);
1735
1736 if (so == NULL)
1737 goto remove_port_forwarding;
1738
1739 psin = (struct sockaddr_in *)&sa;
1740 psin->sin_family = AF_INET;
1741 psin->sin_port = 0;
1742 psin->sin_addr.s_addr = INADDR_ANY;
1743 socketlen = sizeof(struct sockaddr);
1744
1745 rc = getsockname(so->s, &sa, &socketlen);
1746 if (rc < 0 || sa.sa_family != AF_INET)
1747 goto remove_port_forwarding;
1748
1749 psin = (struct sockaddr_in *)&sa;
1750
1751 lib = LibAliasInit(pData, NULL);
1752 flags = LibAliasSetMode(lib, 0, 0);
1753 flags |= PKT_ALIAS_LOG; /* set logging */
1754 flags |= PKT_ALIAS_REVERSE; /* set logging */
1755 flags = LibAliasSetMode(lib, flags, ~0);
1756
1757 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1758 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1759 alias, RT_H2N_U16(rule->guest_port),
1760 pData->special_addr, -1, /* not very clear for now */
1761 rule->proto);
1762 if (!alias_link)
1763 goto remove_port_forwarding;
1764
1765 so->so_la = lib;
1766 rule->activated = 1;
1767 pData->cRedirectionsActive++;
1768 continue;
1769
1770 remove_port_forwarding:
1771 LogRel(("NAT: failed to redirect %s %d => %d\n",
1772 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1773 LIST_REMOVE(rule, list);
1774 pData->cRedirectionsStored--;
1775 RTMemFree(rule);
1776 }
1777}
1778
1779/**
1780 * Changes in 3.1 instead of opening new socket do the following:
1781 * gain more information:
1782 * 1. bind IP
1783 * 2. host port
1784 * 3. guest port
1785 * 4. proto
1786 * 5. guest MAC address
1787 * the guest's MAC address is rather important for service, but we easily
1788 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1789 * corresponding port-forwarding
1790 */
1791int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1792 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1793{
1794 struct port_forward_rule *rule = NULL;
1795 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1796
1797 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1798 if (rule == NULL)
1799 return 1;
1800
1801 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1802 rule->host_port = host_port;
1803 rule->guest_port = guest_port;
1804#ifndef VBOX_WITH_NAT_SERVICE
1805 rule->guest_addr.s_addr = guest_addr.s_addr;
1806#endif
1807 rule->bind_ip.s_addr = host_addr.s_addr;
1808 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1809 /* @todo add mac address */
1810 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1811 pData->cRedirectionsStored++;
1812 return 0;
1813}
1814
1815int slirp_add_exec(PNATState pData, int do_pty, const char *args, int addr_low_byte,
1816 int guest_port)
1817{
1818 return add_exec(&exec_list, do_pty, (char *)args,
1819 addr_low_byte, RT_H2N_U16(guest_port));
1820}
1821
1822void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1823{
1824#ifndef VBOX_WITH_NAT_SERVICE
1825 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1826#endif
1827 if (GuestIP != INADDR_ANY)
1828 {
1829 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1830 activate_port_forwarding(pData, ethaddr);
1831 }
1832}
1833
1834#if defined(RT_OS_WINDOWS)
1835HANDLE *slirp_get_events(PNATState pData)
1836{
1837 return pData->phEvents;
1838}
1839void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1840{
1841 pData->phEvents[index] = hEvent;
1842}
1843#endif
1844
1845unsigned int slirp_get_timeout_ms(PNATState pData)
1846{
1847 if (link_up)
1848 {
1849 if (time_fasttimo)
1850 return 2;
1851 if (do_slowtimo)
1852 return 500; /* see PR_SLOWHZ */
1853 }
1854 return 3600*1000; /* one hour */
1855}
1856
1857#ifndef RT_OS_WINDOWS
1858int slirp_get_nsock(PNATState pData)
1859{
1860 return pData->nsock;
1861}
1862#endif
1863
1864/*
1865 * this function called from NAT thread
1866 */
1867void slirp_post_sent(PNATState pData, void *pvArg)
1868{
1869 struct socket *so = 0;
1870 struct tcpcb *tp = 0;
1871 struct mbuf *m = (struct mbuf *)pvArg;
1872 m_free(pData, m);
1873}
1874#ifdef VBOX_WITH_SLIRP_MT
1875void slirp_process_queue(PNATState pData)
1876{
1877 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1878}
1879void *slirp_get_queue(PNATState pData)
1880{
1881 return pData->pReqQueue;
1882}
1883#endif
1884
1885void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1886{
1887 Log2(("tftp_prefix:%s\n", tftpPrefix));
1888 tftp_prefix = tftpPrefix;
1889}
1890
1891void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1892{
1893 Log2(("bootFile:%s\n", bootFile));
1894 bootp_filename = bootFile;
1895}
1896
1897void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1898{
1899 Log2(("next_server:%s\n", next_server));
1900 if (next_server == NULL)
1901 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1902 else
1903 inet_aton(next_server, &pData->tftp_server);
1904}
1905
1906int slirp_set_binding_address(PNATState pData, char *addr)
1907{
1908 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1909 {
1910 pData->bindIP.s_addr = INADDR_ANY;
1911 return 1;
1912 }
1913 return 0;
1914}
1915
1916void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1917{
1918 if (!pData->use_host_resolver)
1919 {
1920 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1921 pData->use_dns_proxy = fDNSProxy;
1922 }
1923 else
1924 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1925}
1926
1927#define CHECK_ARG(name, val, lim_min, lim_max) \
1928 do { \
1929 if ((val) < (lim_min) || (val) > (lim_max)) \
1930 { \
1931 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1932 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1933 return; \
1934 } \
1935 else \
1936 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1937 } while (0)
1938
1939/* don't allow user set less 8kB and more than 1M values */
1940#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1941void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1942{
1943 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1944 pData->socket_rcv = kilobytes;
1945}
1946void slirp_set_sndbuf(PNATState pData, int kilobytes)
1947{
1948 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1949 pData->socket_snd = kilobytes * _1K;
1950}
1951void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1952{
1953 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1954 tcp_rcvspace = kilobytes * _1K;
1955}
1956void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1957{
1958 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1959 tcp_sndspace = kilobytes * _1K;
1960}
1961
1962/*
1963 * Looking for Ether by ip in ARP-cache
1964 * Note: it´s responsible of caller to allocate buffer for result
1965 * @returns iprt status code
1966 */
1967int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1968{
1969 struct arp_cache_entry *ac;
1970
1971 if (ether == NULL)
1972 return VERR_INVALID_PARAMETER;
1973
1974 if (LIST_EMPTY(&pData->arp_cache))
1975 return VERR_NOT_FOUND;
1976
1977 LIST_FOREACH(ac, &pData->arp_cache, list)
1978 {
1979 if (ac->ip == ip)
1980 {
1981 memcpy(ether, ac->ether, ETH_ALEN);
1982 return VINF_SUCCESS;
1983 }
1984 }
1985 return VERR_NOT_FOUND;
1986}
1987
1988/*
1989 * Looking for IP by Ether in ARP-cache
1990 * Note: it´s responsible of caller to allocate buffer for result
1991 * @returns 0 - if found, 1 - otherwise
1992 */
1993int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1994{
1995 struct arp_cache_entry *ac;
1996 *ip = INADDR_ANY;
1997
1998 if (LIST_EMPTY(&pData->arp_cache))
1999 return VERR_NOT_FOUND;
2000
2001 LIST_FOREACH(ac, &pData->arp_cache, list)
2002 {
2003 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
2004 {
2005 *ip = ac->ip;
2006 return VINF_SUCCESS;
2007 }
2008 }
2009 return VERR_NOT_FOUND;
2010}
2011
2012void slirp_arp_who_has(PNATState pData, uint32_t dst)
2013{
2014 struct mbuf *m;
2015 struct ethhdr *ehdr;
2016 struct arphdr *ahdr;
2017
2018#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2019 m = m_get(pData);
2020#else
2021 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
2022#endif
2023 if (m == NULL)
2024 {
2025 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
2026 return;
2027 }
2028 ehdr = mtod(m, struct ethhdr *);
2029 memset(ehdr->h_source, 0xff, ETH_ALEN);
2030 ahdr = (struct arphdr *)&ehdr[1];
2031 ahdr->ar_hrd = RT_H2N_U16_C(1);
2032 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
2033 ahdr->ar_hln = ETH_ALEN;
2034 ahdr->ar_pln = 4;
2035 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
2036 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
2037 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
2038 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
2039 *(uint32_t *)ahdr->ar_tip = dst;
2040#ifndef VBOX_WITH_SLIRP_BSD_MBUF
2041 m->m_data += if_maxlinkhdr;
2042 m->m_len = sizeof(struct arphdr);
2043#else
2044 /* warn!!! should falls in mbuf minimal size */
2045 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
2046 m->m_data += ETH_HLEN;
2047 m->m_len -= ETH_HLEN;
2048#endif
2049 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
2050}
2051
2052int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
2053{
2054 if (slirp_arp_cache_update(pData, dst, mac))
2055 slirp_arp_cache_add(pData, dst, mac);
2056
2057 return 0;
2058}
2059
2060/* updates the arp cache
2061 * @returns 0 - if has found and updated
2062 * 1 - if hasn't found.
2063 */
2064int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
2065{
2066 struct arp_cache_entry *ac;
2067 LIST_FOREACH(ac, &pData->arp_cache, list)
2068 {
2069 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
2070 {
2071 ac->ip = dst;
2072 return 0;
2073 }
2074 }
2075 return 1;
2076}
2077
2078void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
2079{
2080 struct arp_cache_entry *ac = NULL;
2081 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
2082 if (ac == NULL)
2083 {
2084 LogRel(("NAT: Can't allocate arp cache entry\n"));
2085 return;
2086 }
2087 ac->ip = ip;
2088 memcpy(ac->ether, ether, ETH_ALEN);
2089 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2090}
2091
2092#ifdef VBOX_WITH_SLIRP_BSD_MBUF
2093void slirp_set_mtu(PNATState pData, int mtu)
2094{
2095 if (mtu < 20 || mtu >= 16000)
2096 {
2097 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2098 mtu = 1500;
2099 }
2100 if_mtu =
2101 if_mru = mtu;
2102}
2103#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette