VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 22844

Last change on this file since 22844 was 22843, checked in by vboxsync, 15 years ago

NAT: using host resolver instead of dnsproxy.

  • Property svn:eol-style set to native
File size: 60.6 KB
Line 
1#include "slirp.h"
2#ifdef RT_OS_OS2
3# include <paths.h>
4#endif
5
6#include <VBox/err.h>
7#include <VBox/pdmdrv.h>
8#include <iprt/assert.h>
9#ifndef RT_OS_WINDOWS
10# include <sys/ioctl.h>
11# include <poll.h>
12#else
13# include <Winnls.h>
14# define _WINSOCK2API_
15# include <IPHlpApi.h>
16#endif
17#include <alias.h>
18
19#if !defined(RT_OS_WINDOWS)
20
21# define DO_ENGAGE_EVENT1(so, fdset, label) \
22 do { \
23 if( so->so_poll_index != -1 \
24 && so->s == polls[so->so_poll_index].fd) { \
25 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
26 break; /* out of this loop */ \
27 } \
28 AssertRelease(poll_index < (nfds)); \
29 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
30 polls[poll_index].fd = (so)->s; \
31 (so)->so_poll_index = poll_index; \
32 polls[poll_index].events = N_(fdset ## _poll); \
33 polls[poll_index].revents = 0; \
34 poll_index++; \
35 } while(0)
36
37
38# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
39 do { \
40 if( so->so_poll_index != -1 \
41 && so->s == polls[so->so_poll_index].fd) { \
42 polls[so->so_poll_index].events |= \
43 N_(fdset1 ## _poll) | N_(fdset1 ## _poll); \
44 break; /* out of this loop */ \
45 } \
46 AssertRelease(poll_index < (nfds)); \
47 polls[poll_index].fd = (so)->s; \
48 (so)->so_poll_index = poll_index; \
49 polls[poll_index].events = \
50 N_(fdset1 ## _poll) | N_(fdset1 ## _poll); \
51 poll_index++; \
52 } while(0)
53
54# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
55
56# define DO_CHECK_FD_SET(so, events, fdset) ( ((so)->so_poll_index != -1) \
57 && ((so)->so_poll_index <= ndfs) \
58 && ((so)->s == polls[so->so_poll_index].fd) \
59 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)))
60# define DO_UNIX_CHECK_FD_SET(so, events, fdset ) DO_CHECK_FD_SET((so), (events), fdset) /*specific for Unix API */
61# define DO_WIN_CHECK_FD_SET(so, events, fdset ) 0 /* specific for Windows Winsock API */
62
63# ifndef RT_OS_WINDOWS
64
65# ifndef RT_OS_LINUX
66# define readfds_poll (POLLRDNORM)
67# define writefds_poll (POLLWRNORM)
68# define xfds_poll (POLLRDBAND|POLLWRBAND|POLLPRI)
69# else
70# define readfds_poll (POLLIN)
71# define writefds_poll (POLLOUT)
72# define xfds_poll (POLLPRI)
73# endif
74# define rderr_poll (POLLERR)
75# define rdhup_poll (POLLHUP)
76# define nval_poll (POLLNVAL)
77
78# define ICMP_ENGAGE_EVENT(so, fdset) \
79 do { \
80 if (pData->icmp_socket.s != -1) \
81 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
82 } while (0)
83# else /* !RT_OS_WINDOWS */
84# define DO_WIN_CHECK_FD_SET(so, events, fdset ) DO_CHECK_FD_SET((so), (events), fdset)
85# define ICMP_ENGAGE_EVENT(so, fdset) do {} while(0)
86#endif /* RT_OS_WINDOWS */
87
88#else /* defined(RT_OS_WINDOWS) */
89
90/*
91 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
92 * So no call to WSAEventSelect necessary.
93 */
94# define ICMP_ENGAGE_EVENT(so, fdset) do {} while(0)
95
96# define DO_ENGAGE_EVENT1(so, fdset1, label) \
97 do { \
98 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
99 if (rc == SOCKET_ERROR) \
100 { \
101 /* This should not happen */ \
102 error = WSAGetLastError(); \
103 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
104 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
105 } \
106 } while(0); \
107 CONTINUE(label)
108
109# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
110 DO_ENGAGE_EVENT1((so), (fdset1), label)
111
112# define DO_POLL_EVENTS(rc, error, so, events, label) \
113 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
114 if ((rc) == SOCKET_ERROR) \
115 { \
116 (error) = WSAGetLastError(); \
117 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
118 CONTINUE(label); \
119 }
120
121# define acceptds_win FD_ACCEPT
122# define acceptds_win_bit FD_ACCEPT_BIT
123
124# define readfds_win FD_READ
125# define readfds_win_bit FD_READ_BIT
126
127# define writefds_win FD_WRITE
128# define writefds_win_bit FD_WRITE_BIT
129
130# define xfds_win FD_OOB
131# define xfds_win_bit FD_OOB_BIT
132
133# define DO_CHECK_FD_SET(so, events, fdset) \
134 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
135
136# define DO_WIN_CHECK_FD_SET(so, events, fdset ) DO_CHECK_FD_SET((so), (events), fdset)
137# define DO_UNIX_CHECK_FD_SET(so, events, fdset ) 1 /*specific for Unix API */
138
139#endif /* defined(RT_OS_WINDOWS) */
140
141#define TCP_ENGAGE_EVENT1(so, fdset) \
142 DO_ENGAGE_EVENT1((so), fdset, tcp)
143
144#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
145 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
146
147#define UDP_ENGAGE_EVENT(so, fdset) \
148 DO_ENGAGE_EVENT1((so), fdset, udp)
149
150#define POLL_TCP_EVENTS(rc, error, so, events) \
151 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
152
153#define POLL_UDP_EVENTS(rc, error, so, events) \
154 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
155
156#define CHECK_FD_SET(so, events, set) \
157 (DO_CHECK_FD_SET((so), (events), set))
158
159#define WIN_CHECK_FD_SET(so, events, set) \
160 (DO_WIN_CHECK_FD_SET((so), (events), set))
161#define UNIX_CHECK_FD_SET(so, events, set) \
162 (DO_UNIX_CHECK_FD_SET(so, events, set))
163
164/*
165 * Loging macros
166 */
167#if VBOX_WITH_DEBUG_NAT_SOCKETS
168# if defined(RT_OS_WINDOWS)
169# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
170 do { \
171 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
172 } while (0)
173# else /* RT_OS_WINDOWS */
174# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
175 do { \
176 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
177 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
178 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
179 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
180 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
181 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
182 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
183 } while (0)
184# endif /* !RT_OS_WINDOWS */
185#else /* VBOX_WITH_DEBUG_NAT_SOCKETS */
186# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
187#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
188
189#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
190
191static void activate_port_forwarding(PNATState, struct ethhdr *);
192static uint32_t find_guest_ip(PNATState, const uint8_t *);
193
194static const uint8_t special_ethaddr[6] =
195{
196 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
197};
198
199static const uint8_t broadcast_ethaddr[6] =
200{
201 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
202};
203
204const uint8_t zerro_ethaddr[6] =
205{
206 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
207};
208
209
210#ifdef RT_OS_WINDOWS
211static int get_dns_addr_domain(PNATState pData, bool fVerbose,
212 struct in_addr *pdns_addr,
213 const char **ppszDomain)
214{
215 /* Get amount of memory required for operation */
216 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
217 PIP_ADAPTER_ADDRESSES addresses = NULL;
218 PIP_ADAPTER_ADDRESSES addr = NULL;
219 PIP_ADAPTER_DNS_SERVER_ADDRESS dns = NULL;
220 ULONG size = 0;
221 int wlen = 0;
222 char *suffix;
223 struct dns_entry *da = NULL;
224 struct dns_domain_entry *dd = NULL;
225 ULONG ret = ERROR_SUCCESS;
226
227 /* @todo add SKIPing flags to get only required information */
228
229 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, addresses, &size);
230 if (ret != ERROR_BUFFER_OVERFLOW)
231 {
232 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
233 return -1;
234 }
235
236 if (size == 0)
237 {
238 LogRel(("NAT: Win socket API returns non capacity\n"));
239 return -1;
240 }
241
242 addresses = RTMemAllocZ(size);
243 if (addresses == NULL)
244 {
245 LogRel(("NAT: No memory available \n"));
246 return -1;
247 }
248
249 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, addresses, &size);
250 if (ret != ERROR_SUCCESS)
251 {
252 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
253 RTMemFree(addresses);
254 return -1;
255 }
256 addr = addresses;
257 while(addr != NULL)
258 {
259 int found;
260 if (addr->OperStatus != IfOperStatusUp)
261 goto next;
262 dns = addr->FirstDnsServerAddress;
263 while (dns != NULL)
264 {
265 struct sockaddr *saddr = dns->Address.lpSockaddr;
266 if (saddr->sa_family != AF_INET)
267 goto next_dns;
268 /* add dns server to list */
269 da = RTMemAllocZ(sizeof(struct dns_entry));
270 if (da == NULL)
271 {
272 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
273 RTMemFree(addresses);
274 return VERR_NO_MEMORY;
275 }
276 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &((struct sockaddr_in *)saddr)->sin_addr));
277 if ((((struct sockaddr_in *)saddr)->sin_addr.s_addr & htonl(IN_CLASSA_NET)) == ntohl(INADDR_LOOPBACK & IN_CLASSA_NET)) {
278 da->de_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS);
279 }
280 else
281 {
282 da->de_addr.s_addr = ((struct sockaddr_in *)saddr)->sin_addr.s_addr;
283 }
284 TAILQ_INSERT_HEAD(&pData->dns_list_head, da, de_list);
285
286 if (addr->DnsSuffix == NULL)
287 goto next_dns;
288
289 /*uniq*/
290 RTUtf16ToUtf8(addr->DnsSuffix, &suffix);
291
292 if (!suffix || strlen(suffix) == 0) {
293 RTStrFree(suffix);
294 goto next_dns;
295 }
296
297 found = 0;
298 LIST_FOREACH(dd, &pData->dns_domain_list_head, dd_list)
299 {
300 if ( dd->dd_pszDomain != NULL
301 && strcmp(dd->dd_pszDomain, suffix) == 0)
302 {
303 found = 1;
304 RTStrFree(suffix);
305 break;
306 }
307 }
308 if (found == 0)
309 {
310 dd = RTMemAllocZ(sizeof(struct dns_domain_entry));
311 if (dd == NULL)
312 {
313 LogRel(("NAT: not enough memory\n"));
314 RTStrFree(suffix);
315 RTMemFree(addresses);
316 return VERR_NO_MEMORY;
317 }
318 dd->dd_pszDomain = suffix;
319 LogRel(("NAT: adding domain name %s to search list\n", dd->dd_pszDomain));
320 LIST_INSERT_HEAD(&pData->dns_domain_list_head, dd, dd_list);
321 }
322 next_dns:
323 dns = dns->Next;
324 }
325 next:
326 addr = addr->Next;
327 }
328 RTMemFree(addresses);
329 return 0;
330}
331
332#else /* !RT_OS_WINDOWS */
333
334static int get_dns_addr_domain(PNATState pData, bool fVerbose,
335 struct in_addr *pdns_addr,
336 const char **ppszDomain)
337{
338 char buff[512];
339 char buff2[256];
340 FILE *f = NULL;
341 int found = 0;
342 struct in_addr tmp_addr;
343 int nameservers = 0;
344
345#ifdef RT_OS_OS2
346 /* Try various locations. */
347 char *etc = getenv("ETC");
348 if (etc)
349 {
350 snprintf(buff, sizeof(buff), "%s/RESOLV2", etc);
351 f = fopen(buff, "rt");
352 }
353 if (!f)
354 {
355 snprintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
356 f = fopen(buff, "rt");
357 }
358 if (!f)
359 {
360 snprintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
361 f = fopen(buff, "rt");
362 }
363#else
364#ifndef DEBUG_vvl
365 f = fopen("/etc/resolv.conf", "r");
366#else
367 char *home = getenv("HOME");
368 snprintf(buff, sizeof(buff), "%s/resolv.conf", home);
369 f = fopen(buff, "r");
370 if (f != NULL)
371 {
372 Log(("NAT: DNS we're using %s\n", buff));
373 }
374 else
375 {
376 f = fopen("/etc/resolv.conf", "r");
377 Log(("NAT: DNS we're using %s\n", buff));
378 }
379#endif
380#endif
381 if (!f)
382 return -1;
383
384 if (ppszDomain)
385 *ppszDomain = NULL;
386 Log(("nat: DNS Servers:\n"));
387 while (fgets(buff, 512, f) != NULL)
388 {
389 struct dns_entry *da = NULL;
390 if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1)
391 {
392 if (!inet_aton(buff2, &tmp_addr))
393 continue;
394 /*localhost mask */
395 da = RTMemAllocZ(sizeof (struct dns_entry));
396 if (da == NULL)
397 {
398 LogRel(("can't alloc memory for DNS entry\n"));
399 return -1;
400 }
401 /*check */
402 da->de_addr.s_addr = tmp_addr.s_addr;
403 if ((da->de_addr.s_addr & htonl(IN_CLASSA_NET)) == ntohl(INADDR_LOOPBACK & IN_CLASSA_NET)) {
404 da->de_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS);
405 }
406 TAILQ_INSERT_HEAD(&pData->dns_list_head, da, de_list);
407 found++;
408 }
409 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
410 {
411 char *tok;
412 char *saveptr;
413 struct dns_domain_entry *dd = NULL;
414 int found = 0;
415 tok = strtok_r(&buff[6], " \t\n", &saveptr);
416 LIST_FOREACH(dd, &pData->dns_domain_list_head, dd_list)
417 {
418 if( tok != NULL
419 && strcmp(tok, dd->dd_pszDomain) == 0)
420 {
421 found = 1;
422 break;
423 }
424 }
425 if (tok != NULL && found == 0) {
426 dd = RTMemAllocZ(sizeof(struct dns_domain_entry));
427 if (dd == NULL)
428 {
429 LogRel(("NAT: not enought memory to add domain list\n"));
430 return VERR_NO_MEMORY;
431 }
432 dd->dd_pszDomain = RTStrDup(tok);
433 LogRel(("NAT: adding domain name %s to search list\n", dd->dd_pszDomain));
434 LIST_INSERT_HEAD(&pData->dns_domain_list_head, dd, dd_list);
435 }
436 }
437 }
438 fclose(f);
439 if (!found)
440 return -1;
441 return 0;
442}
443
444#endif
445
446static void alias_init(PNATState pData, struct libalias **pla, int la_flags, struct in_addr addr)
447{
448 int flags = 0;
449 struct libalias *la;
450 la = LibAliasInit(pData, NULL);
451 if (la == NULL)
452 {
453 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
454 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
455 }
456 flags = LibAliasSetMode(la, 0, 0);
457#ifndef NO_FW_PUNCH
458 flags |= PKT_ALIAS_PUNCH_FW;
459#endif
460#ifdef DEBUG
461 flags |= PKT_ALIAS_LOG; /* set logging */
462#endif
463 flags |= la_flags;
464 flags = LibAliasSetMode(la, flags, ~0);
465 LibAliasSetAddress(la, addr);
466 *pla = la;
467}
468
469static int slirp_init_dns_list(PNATState pData)
470{
471 TAILQ_INIT(&pData->dns_list_head);
472 LIST_INIT(&pData->dns_domain_list_head);
473 return get_dns_addr_domain(pData, true, NULL, NULL);
474}
475
476static void slirp_release_dns_list(PNATState pData)
477{
478 struct dns_entry *de = NULL;
479 struct dns_domain_entry *dd = NULL;
480 while(!TAILQ_EMPTY(&pData->dns_list_head)) {
481 de = TAILQ_FIRST(&pData->dns_list_head);
482 TAILQ_REMOVE(&pData->dns_list_head, de, de_list);
483 RTMemFree(de);
484 }
485 while(!LIST_EMPTY(&pData->dns_domain_list_head)) {
486 dd = LIST_FIRST(&pData->dns_domain_list_head);
487 LIST_REMOVE(dd, dd_list);
488 if (dd->dd_pszDomain != NULL)
489 RTStrFree(dd->dd_pszDomain);
490 RTMemFree(dd);
491 }
492}
493
494int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
495{
496 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
497}
498
499#ifndef VBOX_WITH_NAT_SERVICE
500int slirp_init(PNATState *ppData, const char *pszNetAddr, uint32_t u32Netmask,
501 bool fPassDomain, void *pvUser)
502#else
503int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
504 bool fPassDomain, void *pvUser)
505#endif
506{
507 int fNATfailed = 0;
508 int rc;
509 PNATState pData = RTMemAllocZ(sizeof(NATState));
510 *ppData = pData;
511 if (!pData)
512 return VERR_NO_MEMORY;
513 if (u32Netmask & 0x1f)
514 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
515 return VERR_INVALID_PARAMETER;
516 pData->fPassDomain = fPassDomain;
517 pData->pvUser = pvUser;
518 pData->netmask = u32Netmask;
519
520 /* sockets & TCP defaults */
521 pData->socket_rcv = 64 * _1K;
522 pData->socket_snd = 64 * _1K;
523 tcp_sndspace = 64 * _1K;
524 tcp_rcvspace = 64 * _1K;
525
526#ifdef RT_OS_WINDOWS
527 {
528 WSADATA Data;
529 WSAStartup(MAKEWORD(2, 0), &Data);
530 }
531 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
532#endif
533#ifdef VBOX_WITH_SLIRP_MT
534 QSOCKET_LOCK_CREATE(tcb);
535 QSOCKET_LOCK_CREATE(udb);
536 rc = RTReqCreateQueue(&pData->pReqQueue);
537 AssertReleaseRC(rc);
538#endif
539
540 link_up = 1;
541
542 rc = bootp_dhcp_init(pData);
543 if (rc != 0)
544 {
545 LogRel(("NAT: DHCP server initialization was failed\n"));
546 return VINF_NAT_DNS;
547 }
548 debug_init();
549 if_init(pData);
550 ip_init(pData);
551 icmp_init(pData);
552
553 /* Initialise mbufs *after* setting the MTU */
554 m_init(pData);
555
556#ifndef VBOX_WITH_NAT_SERVICE
557 inet_aton(pszNetAddr, &special_addr);
558#else
559 special_addr.s_addr = u32NetAddr;
560#endif
561 pData->slirp_ethaddr = &special_ethaddr[0];
562 alias_addr.s_addr = special_addr.s_addr | htonl(CTL_ALIAS);
563 /* @todo: add ability to configure this staff */
564
565 /* set default addresses */
566 loopback_addr.s_addr = INADDR_LOOPBACK;
567 if (slirp_init_dns_list(pData) < 0)
568 fNATfailed = 1;
569
570 dnsproxy_init(pData);
571
572 getouraddr(pData);
573 {
574 struct in_addr proxy_addr;
575 proxy_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS);
576 alias_init(pData, &pData->proxy_alias, 0, proxy_addr);
577
578#if 0
579 proxy_addr.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_DNS);
580 alias_init(pData, &pData->dns_alias, PKT_ALIAS_REVERSE, proxy_addr);
581#endif
582
583 ftp_alias_load(pData);
584 nbt_alias_load(pData);
585 dns_alias_load(pData);
586 }
587 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
588}
589
590/**
591 * Register statistics.
592 */
593void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
594{
595#ifdef VBOX_WITH_STATISTICS
596# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
597# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
598# include "counters.h"
599# undef COUNTER
600/** @todo register statistics for the variables dumped by:
601 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
602 * mbufstats(pData); sockstats(pData); */
603#endif /* VBOX_WITH_STATISTICS */
604}
605
606/**
607 * Deregister statistics.
608 */
609void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
610{
611#ifdef VBOX_WITH_STATISTICS
612# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
613# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
614# include "counters.h"
615#endif /* VBOX_WITH_STATISTICS */
616}
617
618/**
619 * Marks the link as up, making it possible to establish new connections.
620 */
621void slirp_link_up(PNATState pData)
622{
623 link_up = 1;
624}
625
626/**
627 * Marks the link as down and cleans up the current connections.
628 */
629void slirp_link_down(PNATState pData)
630{
631 struct socket *so;
632
633 while ((so = tcb.so_next) != &tcb)
634 {
635 if (so->so_state & SS_NOFDREF || so->s == -1)
636 sofree(pData, so);
637 else
638 tcp_drop(pData, sototcpcb(so), 0);
639 }
640
641 while ((so = udb.so_next) != &udb)
642 udp_detach(pData, so);
643
644 link_up = 0;
645}
646
647/**
648 * Terminates the slirp component.
649 */
650void slirp_term(PNATState pData)
651{
652#ifdef RT_OS_WINDOWS
653 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
654 FreeLibrary(pData->hmIcmpLibrary);
655 RTMemFree(pData->pvIcmpBuffer);
656#else
657 closesocket(pData->icmp_socket.s);
658#endif
659
660 slirp_link_down(pData);
661 slirp_release_dns_list(pData);
662 ftp_alias_unload(pData);
663 nbt_alias_unload(pData);
664 dns_alias_unload(pData);
665 while(!LIST_EMPTY(&instancehead))
666 {
667 struct libalias *la = LIST_FIRST(&instancehead);
668 /* libalias do all clean up */
669 LibAliasUninit(la);
670 }
671 while(!LIST_EMPTY(&pData->arp_cache))
672 {
673 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
674 LIST_REMOVE(ac, list);
675 RTMemFree(ac);
676 }
677 bootp_dhcp_fini(pData);
678#ifdef RT_OS_WINDOWS
679 WSACleanup();
680#endif
681#ifdef LOG_ENABLED
682 Log(("\n"
683 "NAT statistics\n"
684 "--------------\n"
685 "\n"));
686 ipstats(pData);
687 tcpstats(pData);
688 udpstats(pData);
689 icmpstats(pData);
690 mbufstats(pData);
691 sockstats(pData);
692 Log(("\n"
693 "\n"
694 "\n"));
695#endif
696 RTMemFree(pData);
697}
698
699
700#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
701#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
702
703/*
704 * curtime kept to an accuracy of 1ms
705 */
706static void updtime(PNATState pData)
707{
708#ifdef RT_OS_WINDOWS
709 struct _timeb tb;
710
711 _ftime(&tb);
712 curtime = (u_int)tb.time * (u_int)1000;
713 curtime += (u_int)tb.millitm;
714#else
715 gettimeofday(&tt, 0);
716
717 curtime = (u_int)tt.tv_sec * (u_int)1000;
718 curtime += (u_int)tt.tv_usec / (u_int)1000;
719
720 if ((tt.tv_usec % 1000) >= 500)
721 curtime++;
722#endif
723}
724
725#ifdef RT_OS_WINDOWS
726void slirp_select_fill(PNATState pData, int *pnfds)
727#else /* RT_OS_WINDOWS */
728void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
729#endif /* !RT_OS_WINDOWS */
730{
731 struct socket *so, *so_next;
732 int nfds;
733#if defined(RT_OS_WINDOWS)
734 int rc;
735 int error;
736#else
737 int poll_index = 0;
738#endif
739 int i;
740
741 STAM_PROFILE_START(&pData->StatFill, a);
742
743 nfds = *pnfds;
744
745 /*
746 * First, TCP sockets
747 */
748 do_slowtimo = 0;
749 if (!link_up)
750 goto done;
751 /*
752 * *_slowtimo needs calling if there are IP fragments
753 * in the fragment queue, or there are TCP connections active
754 */
755 /* XXX:
756 * triggering of fragment expiration should be the same but use new macroses
757 */
758 do_slowtimo = (tcb.so_next != &tcb);
759 if (!do_slowtimo)
760 {
761 for (i = 0; i < IPREASS_NHASH; i++)
762 {
763 if (!TAILQ_EMPTY(&ipq[i]))
764 {
765 do_slowtimo = 1;
766 break;
767 }
768 }
769 }
770 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
771
772 STAM_COUNTER_RESET(&pData->StatTCP);
773 STAM_COUNTER_RESET(&pData->StatTCPHot);
774
775 QSOCKET_FOREACH(so, so_next, tcp)
776 /* { */
777#if !defined(RT_OS_WINDOWS)
778 so->so_poll_index = -1;
779#endif
780 STAM_COUNTER_INC(&pData->StatTCP);
781
782 /*
783 * See if we need a tcp_fasttimo
784 */
785 if ( time_fasttimo == 0
786 && so->so_tcpcb != NULL
787 && so->so_tcpcb->t_flags & TF_DELACK)
788 time_fasttimo = curtime; /* Flag when we want a fasttimo */
789
790 /*
791 * NOFDREF can include still connecting to local-host,
792 * newly socreated() sockets etc. Don't want to select these.
793 */
794 if (so->so_state & SS_NOFDREF || so->s == -1)
795 CONTINUE(tcp);
796
797 /*
798 * Set for reading sockets which are accepting
799 */
800 if (so->so_state & SS_FACCEPTCONN)
801 {
802 STAM_COUNTER_INC(&pData->StatTCPHot);
803 TCP_ENGAGE_EVENT1(so, readfds);
804 CONTINUE(tcp);
805 }
806
807 /*
808 * Set for writing sockets which are connecting
809 */
810 if (so->so_state & SS_ISFCONNECTING)
811 {
812 Log2(("connecting %R[natsock] engaged\n",so));
813 STAM_COUNTER_INC(&pData->StatTCPHot);
814 TCP_ENGAGE_EVENT1(so, writefds);
815 }
816
817 /*
818 * Set for writing if we are connected, can send more, and
819 * we have something to send
820 */
821 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
822 {
823 STAM_COUNTER_INC(&pData->StatTCPHot);
824 TCP_ENGAGE_EVENT1(so, writefds);
825 }
826
827 /*
828 * Set for reading (and urgent data) if we are connected, can
829 * receive more, and we have room for it XXX /2 ?
830 */
831 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
832 {
833 STAM_COUNTER_INC(&pData->StatTCPHot);
834 TCP_ENGAGE_EVENT2(so, readfds, xfds);
835 }
836 LOOP_LABEL(tcp, so, so_next);
837 }
838
839 /*
840 * UDP sockets
841 */
842 STAM_COUNTER_RESET(&pData->StatUDP);
843 STAM_COUNTER_RESET(&pData->StatUDPHot);
844
845 QSOCKET_FOREACH(so, so_next, udp)
846 /* { */
847
848 STAM_COUNTER_INC(&pData->StatUDP);
849#if !defined(RT_OS_WINDOWS)
850 so->so_poll_index = -1;
851#endif
852
853 /*
854 * See if it's timed out
855 */
856 if (so->so_expire)
857 {
858 if (so->so_expire <= curtime)
859 {
860 Log2(("NAT: %R[natsock] expired\n", so));
861 if (so->so_timeout != NULL)
862 {
863 so->so_timeout(pData, so, so->so_timeout_arg);
864 }
865#ifdef VBOX_WITH_SLIRP_MT
866 /* we need so_next for continue our cycle*/
867 so_next = so->so_next;
868#endif
869 UDP_DETACH(pData, so, so_next);
870 CONTINUE_NO_UNLOCK(udp);
871 }
872 else
873 do_slowtimo = 1; /* Let socket expire */
874 }
875
876 /*
877 * When UDP packets are received from over the link, they're
878 * sendto()'d straight away, so no need for setting for writing
879 * Limit the number of packets queued by this session to 4.
880 * Note that even though we try and limit this to 4 packets,
881 * the session could have more queued if the packets needed
882 * to be fragmented.
883 *
884 * (XXX <= 4 ?)
885 */
886 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
887 {
888 STAM_COUNTER_INC(&pData->StatUDPHot);
889 UDP_ENGAGE_EVENT(so, readfds);
890 }
891 LOOP_LABEL(udp, so, so_next);
892 }
893done:
894
895#if defined(RT_OS_WINDOWS)
896 *pnfds = VBOX_EVENT_COUNT;
897#else /* RT_OS_WINDOWS */
898 AssertRelease(poll_index <= *pnfds);
899 *pnfds = poll_index;
900#endif /* !RT_OS_WINDOWS */
901
902 STAM_PROFILE_STOP(&pData->StatFill, a);
903}
904
905#if defined(RT_OS_WINDOWS)
906void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
907#else /* RT_OS_WINDOWS */
908void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
909#endif /* !RT_OS_WINDOWS */
910{
911 struct socket *so, *so_next;
912 int ret;
913#if defined(RT_OS_WINDOWS)
914 WSANETWORKEVENTS NetworkEvents;
915 int rc;
916 int error;
917#else
918 int poll_index = 0;
919#endif
920
921 STAM_PROFILE_START(&pData->StatPoll, a);
922
923 /* Update time */
924 updtime(pData);
925
926 /*
927 * See if anything has timed out
928 */
929 if (link_up)
930 {
931 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
932 {
933 STAM_PROFILE_START(&pData->StatFastTimer, a);
934 tcp_fasttimo(pData);
935 time_fasttimo = 0;
936 STAM_PROFILE_STOP(&pData->StatFastTimer, a);
937 }
938 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
939 {
940 STAM_PROFILE_START(&pData->StatSlowTimer, a);
941 ip_slowtimo(pData);
942 tcp_slowtimo(pData);
943 last_slowtimo = curtime;
944 STAM_PROFILE_STOP(&pData->StatSlowTimer, a);
945 }
946 }
947#if defined(RT_OS_WINDOWS)
948 if (fTimeout)
949 return; /* only timer update */
950#endif
951
952 /*
953 * Check sockets
954 */
955 if (!link_up)
956 goto done;
957#if defined(RT_OS_WINDOWS)
958 /*XXX: before renaming please make see define
959 * fIcmp in slirp_state.h
960 */
961 if (fIcmp)
962 sorecvfrom(pData, &pData->icmp_socket);
963#else
964 if ( (pData->icmp_socket.s != -1)
965 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
966 sorecvfrom(pData, &pData->icmp_socket);
967#endif
968 /*
969 * Check TCP sockets
970 */
971 QSOCKET_FOREACH(so, so_next, tcp)
972 /* { */
973
974#ifdef VBOX_WITH_SLIRP_MT
975 if ( so->so_state & SS_NOFDREF
976 && so->so_deleted == 1)
977 {
978 struct socket *son, *sop = NULL;
979 QSOCKET_LOCK(tcb);
980 if (so->so_next != NULL)
981 {
982 if (so->so_next != &tcb)
983 SOCKET_LOCK(so->so_next);
984 son = so->so_next;
985 }
986 if ( so->so_prev != &tcb
987 && so->so_prev != NULL)
988 {
989 SOCKET_LOCK(so->so_prev);
990 sop = so->so_prev;
991 }
992 QSOCKET_UNLOCK(tcb);
993 remque(pData, so);
994 NSOCK_DEC();
995 SOCKET_UNLOCK(so);
996 SOCKET_LOCK_DESTROY(so);
997 RTMemFree(so);
998 so_next = son;
999 if (sop != NULL)
1000 SOCKET_UNLOCK(sop);
1001 CONTINUE_NO_UNLOCK(tcp);
1002 }
1003#endif
1004 /*
1005 * FD_ISSET is meaningless on these sockets
1006 * (and they can crash the program)
1007 */
1008 if (so->so_state & SS_NOFDREF || so->s == -1)
1009 CONTINUE(tcp);
1010
1011 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1012
1013 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1014
1015
1016 /*
1017 * Check for URG data
1018 * This will soread as well, so no need to
1019 * test for readfds below if this succeeds
1020 */
1021
1022 /* out-of-band data */
1023 if (CHECK_FD_SET(so, NetworkEvents, xfds))
1024 {
1025 sorecvoob(pData, so);
1026 }
1027
1028 /*
1029 * Check sockets for reading
1030 */
1031 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1032 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1033 {
1034 /*
1035 * Check for incoming connections
1036 */
1037 if (so->so_state & SS_FACCEPTCONN)
1038 {
1039 TCP_CONNECT(pData, so);
1040#if defined(RT_OS_WINDOWS)
1041 if (!(NetworkEvents.lNetworkEvents & FD_CLOSE))
1042#endif
1043 CONTINUE(tcp);
1044 }
1045
1046 ret = soread(pData, so);
1047 /* Output it if we read something */
1048 if (RT_LIKELY(ret > 0))
1049 TCP_OUTPUT(pData, sototcpcb(so));
1050 }
1051
1052#if defined(RT_OS_WINDOWS)
1053 /*
1054 * Check for FD_CLOSE events.
1055 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1056 */
1057 if ( (NetworkEvents.lNetworkEvents & FD_CLOSE)
1058 || (so->so_close == 1))
1059 {
1060 so->so_close = 1; /* mark it */
1061 /*
1062 * drain the socket
1063 */
1064 for (;;)
1065 {
1066 ret = soread(pData, so);
1067 if (ret > 0)
1068 TCP_OUTPUT(pData, sototcpcb(so));
1069 else
1070 break;
1071 }
1072 CONTINUE(tcp);
1073 }
1074#endif
1075
1076 /*
1077 * Check sockets for writing
1078 */
1079 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1080 {
1081 /*
1082 * Check for non-blocking, still-connecting sockets
1083 */
1084 if (so->so_state & SS_ISFCONNECTING)
1085 {
1086 Log2(("connecting %R[natsock] catched\n", so));
1087 /* Connected */
1088 so->so_state &= ~SS_ISFCONNECTING;
1089
1090 /*
1091 * This should be probably guarded by PROBE_CONN too. Anyway,
1092 * we disable it on OS/2 because the below send call returns
1093 * EFAULT which causes the opened TCP socket to close right
1094 * after it has been opened and connected.
1095 */
1096#ifndef RT_OS_OS2
1097 ret = send(so->s, (const char *)&ret, 0, 0);
1098 if (ret < 0)
1099 {
1100 /* XXXXX Must fix, zero bytes is a NOP */
1101 if ( errno == EAGAIN
1102 || errno == EWOULDBLOCK
1103 || errno == EINPROGRESS
1104 || errno == ENOTCONN)
1105 CONTINUE(tcp);
1106
1107 /* else failed */
1108 so->so_state = SS_NOFDREF;
1109 }
1110 /* else so->so_state &= ~SS_ISFCONNECTING; */
1111#endif
1112
1113 /*
1114 * Continue tcp_input
1115 */
1116 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1117 /* continue; */
1118 }
1119 else
1120 SOWRITE(ret, pData, so);
1121 /*
1122 * XXX If we wrote something (a lot), there could be the need
1123 * for a window update. In the worst case, the remote will send
1124 * a window probe to get things going again.
1125 */
1126 }
1127
1128 /*
1129 * Probe a still-connecting, non-blocking socket
1130 * to check if it's still alive
1131 */
1132#ifdef PROBE_CONN
1133 if (so->so_state & SS_ISFCONNECTING)
1134 {
1135 ret = recv(so->s, (char *)&ret, 0, 0);
1136
1137 if (ret < 0)
1138 {
1139 /* XXX */
1140 if ( errno == EAGAIN
1141 || errno == EWOULDBLOCK
1142 || errno == EINPROGRESS
1143 || errno == ENOTCONN)
1144 {
1145 CONTINUE(tcp); /* Still connecting, continue */
1146 }
1147
1148 /* else failed */
1149 so->so_state = SS_NOFDREF;
1150
1151 /* tcp_input will take care of it */
1152 }
1153 else
1154 {
1155 ret = send(so->s, &ret, 0, 0);
1156 if (ret < 0)
1157 {
1158 /* XXX */
1159 if ( errno == EAGAIN
1160 || errno == EWOULDBLOCK
1161 || errno == EINPROGRESS
1162 || errno == ENOTCONN)
1163 {
1164 CONTINUE(tcp);
1165 }
1166 /* else failed */
1167 so->so_state = SS_NOFDREF;
1168 }
1169 else
1170 so->so_state &= ~SS_ISFCONNECTING;
1171
1172 }
1173 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1174 } /* SS_ISFCONNECTING */
1175#endif
1176#ifndef RT_OS_WINDOWS
1177 if ( UNIX_CHECK_FD_SET(so, NetworkEvents, rdhup)
1178 || UNIX_CHECK_FD_SET(so, NetworkEvents, rderr))
1179 {
1180 int err;
1181 int inq, outq;
1182 int status;
1183 socklen_t optlen = sizeof(int);
1184 inq = outq = 0;
1185 status = getsockopt(so->s, SOL_SOCKET, SO_ERROR, &err, &optlen);
1186 if (status != 0)
1187 Log(("NAT: can't get error status from %R[natsock]\n", so));
1188#ifndef RT_OS_SOLARIS
1189 status = ioctl(so->s, FIONREAD, &inq); /* tcp(7) recommends SIOCINQ which is Linux specific */
1190 if (status != 0 || status != EINVAL)
1191 {
1192 /* EINVAL returned if socket in listen state tcp(7)*/
1193 Log(("NAT: can't get depth of IN queue status from %R[natsock]\n", so));
1194 }
1195 status = ioctl(so->s, TIOCOUTQ, &outq); /* SIOCOUTQ see previous comment */
1196 if (status != 0)
1197 Log(("NAT: can't get depth of OUT queue from %R[natsock]\n", so));
1198#else
1199 /*
1200 * Solaris has bit different ioctl commands and its handlings
1201 * hint: streamio(7) I_NREAD
1202 */
1203#endif
1204 if ( so->so_state & SS_ISFCONNECTING
1205 || UNIX_CHECK_FD_SET(so, NetworkEvents, readfds))
1206 {
1207 /**
1208 * Check if we need here take care about gracefull connection
1209 * @todo try with proxy server
1210 */
1211 if (UNIX_CHECK_FD_SET(so, NetworkEvents, readfds))
1212 {
1213 /*
1214 * Never meet inq != 0 or outq != 0, anyway let it stay for a while
1215 * in case it happens we'll able to detect it.
1216 * Give TCP/IP stack wait or expire the socket.
1217 */
1218 Log(("NAT: %R[natsock] err(%d:%s) s(in:%d,out:%d)happens on read I/O, "
1219 "other side close connection \n", so, err, strerror(err), inq, outq));
1220 CONTINUE(tcp);
1221 }
1222 goto tcp_input_close;
1223 }
1224 if ( !UNIX_CHECK_FD_SET(so, NetworkEvents, readfds)
1225 && !UNIX_CHECK_FD_SET(so, NetworkEvents, writefds)
1226 && !UNIX_CHECK_FD_SET(so, NetworkEvents, xfds))
1227 {
1228 Log(("NAT: system expires the socket %R[natsock] err(%d:%s) s(in:%d,out:%d) happens on non-I/O. ",
1229 so, err, strerror(err), inq, outq));
1230 goto tcp_input_close;
1231 }
1232 Log(("NAT: %R[natsock] we've met(%d:%s) s(in:%d, out:%d) unhandled combination hup (%d) "
1233 "rederr(%d) on (r:%d, w:%d, x:%d)\n",
1234 so, err, strerror(err),
1235 inq, outq,
1236 UNIX_CHECK_FD_SET(so, ign, rdhup),
1237 UNIX_CHECK_FD_SET(so, ign, rderr),
1238 UNIX_CHECK_FD_SET(so, ign, readfds),
1239 UNIX_CHECK_FD_SET(so, ign, writefds),
1240 UNIX_CHECK_FD_SET(so, ign, xfds)));
1241 /*
1242 * Give OS's TCP/IP stack a chance to resolve an issue or expire the socket.
1243 */
1244 CONTINUE(tcp);
1245tcp_input_close:
1246 so->so_state = SS_NOFDREF; /*cause connection valid tcp connection termination and socket closing */
1247 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1248 CONTINUE(tcp);
1249 }
1250#endif
1251 LOOP_LABEL(tcp, so, so_next);
1252 }
1253
1254 /*
1255 * Now UDP sockets.
1256 * Incoming packets are sent straight away, they're not buffered.
1257 * Incoming UDP data isn't buffered either.
1258 */
1259 QSOCKET_FOREACH(so, so_next, udp)
1260 /* { */
1261#ifdef VBOX_WITH_SLIRP_MT
1262 if ( so->so_state & SS_NOFDREF
1263 && so->so_deleted == 1)
1264 {
1265 struct socket *son, *sop = NULL;
1266 QSOCKET_LOCK(udb);
1267 if (so->so_next != NULL)
1268 {
1269 if (so->so_next != &udb)
1270 SOCKET_LOCK(so->so_next);
1271 son = so->so_next;
1272 }
1273 if ( so->so_prev != &udb
1274 && so->so_prev != NULL)
1275 {
1276 SOCKET_LOCK(so->so_prev);
1277 sop = so->so_prev;
1278 }
1279 QSOCKET_UNLOCK(udb);
1280 remque(pData, so);
1281 NSOCK_DEC();
1282 SOCKET_UNLOCK(so);
1283 SOCKET_LOCK_DESTROY(so);
1284 RTMemFree(so);
1285 so_next = son;
1286 if (sop != NULL)
1287 SOCKET_UNLOCK(sop);
1288 CONTINUE_NO_UNLOCK(udp);
1289 }
1290#endif
1291 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1292
1293 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1294
1295 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1296 {
1297 SORECVFROM(pData, so);
1298 }
1299 LOOP_LABEL(udp, so, so_next);
1300 }
1301
1302done:
1303#ifndef VBOX_WITH_SLIRP_MT
1304 /*
1305 * See if we can start outputting
1306 */
1307 if (if_queued && link_up)
1308 if_start(pData);
1309#endif
1310
1311 STAM_PROFILE_STOP(&pData->StatPoll, a);
1312}
1313
1314
1315struct arphdr
1316{
1317 unsigned short ar_hrd; /* format of hardware address */
1318 unsigned short ar_pro; /* format of protocol address */
1319 unsigned char ar_hln; /* length of hardware address */
1320 unsigned char ar_pln; /* length of protocol address */
1321 unsigned short ar_op; /* ARP opcode (command) */
1322
1323 /*
1324 * Ethernet looks like this : This bit is variable sized however...
1325 */
1326 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1327 unsigned char ar_sip[4]; /* sender IP address */
1328 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1329 unsigned char ar_tip[4]; /* target IP address */
1330};
1331AssertCompileSize(struct arphdr, 28);
1332
1333static void arp_input(PNATState pData, struct mbuf *m)
1334{
1335 struct ethhdr *eh;
1336 struct ethhdr *reh;
1337 struct arphdr *ah;
1338 struct arphdr *rah;
1339 int ar_op;
1340 struct ex_list *ex_ptr;
1341 uint32_t htip;
1342 uint32_t tip;
1343 struct mbuf *mr;
1344 eh = mtod(m, struct ethhdr *);
1345 ah = (struct arphdr *)&eh[1];
1346 htip = ntohl(*(uint32_t*)ah->ar_tip);
1347 tip = *(uint32_t*)ah->ar_tip;
1348
1349
1350 ar_op = ntohs(ah->ar_op);
1351 switch(ar_op)
1352 {
1353 case ARPOP_REQUEST:
1354 mr = m_get(pData);
1355
1356 reh = mtod(mr, struct ethhdr *);
1357 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1358 Log4(("NAT: arp:%R[ether]->%R[ether]\n",
1359 reh->h_source, reh->h_dest));
1360 Log4(("NAT: arp: %R[IP4]\n", &tip));
1361
1362 mr->m_data += if_maxlinkhdr;
1363 mr->m_len = sizeof(struct arphdr);
1364 rah = mtod(mr, struct arphdr *);
1365#ifdef VBOX_WITH_NAT_SERVICE
1366 if (tip == special_addr.s_addr) goto arp_ok;
1367#endif
1368 if ((htip & pData->netmask) == ntohl(special_addr.s_addr))
1369 {
1370 if ( CTL_CHECK(htip, CTL_DNS)
1371 || CTL_CHECK(htip, CTL_ALIAS)
1372 || CTL_CHECK(htip, CTL_TFTP))
1373 goto arp_ok;
1374 for (ex_ptr = exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next)
1375 {
1376 if ((htip & ~pData->netmask) == ex_ptr->ex_addr)
1377 {
1378 goto arp_ok;
1379 }
1380 }
1381 return;
1382 arp_ok:
1383 rah->ar_hrd = htons(1);
1384 rah->ar_pro = htons(ETH_P_IP);
1385 rah->ar_hln = ETH_ALEN;
1386 rah->ar_pln = 4;
1387 rah->ar_op = htons(ARPOP_REPLY);
1388 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1389
1390 switch (htip & ~pData->netmask)
1391 {
1392 case CTL_DNS:
1393 case CTL_ALIAS:
1394 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1395 break;
1396 default:;
1397 }
1398
1399 memcpy(rah->ar_sip, ah->ar_tip, 4);
1400 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1401 memcpy(rah->ar_tip, ah->ar_sip, 4);
1402 if_encap(pData, ETH_P_ARP, mr);
1403 m_free(pData, m);
1404 }
1405 /*Gratuitous ARP*/
1406 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1407 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1408 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1409 {
1410 /* we've received anounce about address asignment
1411 * Let's do ARP cache update
1412 */
1413 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1414 {
1415 m_free(pData, mr);
1416 m_free(pData, m);
1417 break;
1418 }
1419 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1420 /* good opportunity to activate port-forwarding on address (self)asignment*/
1421 activate_port_forwarding(pData, eh);
1422 }
1423 break;
1424 case ARPOP_REPLY:
1425 {
1426 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1427 {
1428 m_free(pData, m);
1429 break;
1430 }
1431 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1432 /*after/save restore we need up port forwarding again*/
1433 if (pData->port_forwarding_activated == 0)
1434 activate_port_forwarding(pData, eh);
1435 m_free(pData, m);
1436 }
1437 break;
1438 default:
1439 break;
1440 }
1441}
1442
1443void slirp_input(PNATState pData, const uint8_t *pkt, int pkt_len)
1444{
1445 struct mbuf *m;
1446 int proto;
1447 static bool fWarnedIpv6;
1448 struct ethhdr *eh = (struct ethhdr*)pkt;
1449
1450 Log2(("NAT: slirp_input %d\n", pkt_len));
1451 if (pkt_len < ETH_HLEN)
1452 {
1453 LogRel(("NAT: packet having size %d has been ingnored\n", pkt_len));
1454 return;
1455 }
1456 Log4(("NAT: in:%R[ether]->%R[ether]\n", &eh->h_source, &eh->h_dest));
1457
1458 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) == 0)
1459 {
1460 /* @todo vasily: add ether logging routine in debug.c */
1461 Log(("NAT: packet was addressed to other MAC\n"));
1462 RTMemFree((void *)pkt);
1463 return;
1464 }
1465
1466 m = m_get(pData);
1467 if (!m)
1468 {
1469 LogRel(("NAT: can't allocate new mbuf\n"));
1470 return;
1471 }
1472
1473 /* Note: we add to align the IP header */
1474
1475 if (M_FREEROOM(m) < pkt_len)
1476 m_inc(m, pkt_len);
1477
1478 m->m_len = pkt_len ;
1479 memcpy(m->m_data, pkt, pkt_len);
1480
1481#if 1
1482 if (pData->port_forwarding_activated == 0)
1483 activate_port_forwarding(pData, mtod(m, struct ethhdr *));
1484#endif
1485
1486 proto = ntohs(*(uint16_t *)(pkt + 12));
1487 switch(proto)
1488 {
1489 case ETH_P_ARP:
1490 arp_input(pData, m);
1491 break;
1492 case ETH_P_IP:
1493 /* Update time. Important if the network is very quiet, as otherwise
1494 * the first outgoing connection gets an incorrect timestamp. */
1495 updtime(pData);
1496 m_adj(m, ETH_HLEN);
1497 ip_input(pData, m);
1498 break;
1499 case ETH_P_IPV6:
1500 m_free(pData, m);
1501 if (!fWarnedIpv6)
1502 {
1503 LogRel(("NAT: IPv6 not supported\n"));
1504 fWarnedIpv6 = true;
1505 }
1506 break;
1507 default:
1508 Log(("NAT: Unsupported protocol %x\n", proto));
1509 m_free(pData, m);
1510 break;
1511 }
1512 RTMemFree((void *)pkt);
1513}
1514
1515/* output the IP packet to the ethernet device */
1516void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m)
1517{
1518 struct ethhdr *eh;
1519 uint8_t *buf = NULL;
1520 STAM_PROFILE_START(&pData->StatIF_encap, a);
1521
1522 m->m_data -= if_maxlinkhdr;
1523 m->m_len += ETH_HLEN;
1524 eh = mtod(m, struct ethhdr *);
1525
1526 if(MBUF_HEAD(m) != m->m_data)
1527 {
1528 LogRel(("NAT: ethernet detects corruption of the packet"));
1529 AssertMsgFailed(("!!Ethernet frame corrupted!!"));
1530 }
1531
1532 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1533 {
1534 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1535 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1536 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1537 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1538 {
1539 /* don't do anything */
1540 goto done;
1541 }
1542 }
1543 buf = RTMemAlloc(1600);
1544 if (buf == NULL)
1545 {
1546 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1547 goto done;
1548 }
1549 eh->h_proto = htons(eth_proto);
1550 memcpy(buf, mtod(m, uint8_t *), m->m_len);
1551 slirp_output(pData->pvUser, NULL, buf, m->m_len);
1552done:
1553 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1554 m_free(pData, m);
1555}
1556
1557/**
1558 * Still we're using dhcp server leasing to map ether to IP
1559 * @todo see rt_lookup_in_cache
1560 */
1561static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1562{
1563 uint32_t ip = INADDR_ANY;
1564 if (eth_addr == NULL)
1565 goto done;
1566 if (memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1567 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1568 goto done;
1569 if(slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip) == 0)
1570 goto done;
1571 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1572done:
1573 return ip;
1574}
1575
1576/**
1577 * We need check if we've activated port forwarding
1578 * for specific machine ... that of course relates to
1579 * service mode
1580 * @todo finish this for service case
1581 */
1582static void activate_port_forwarding(PNATState pData, struct ethhdr *ethdr)
1583{
1584 struct port_forward_rule *rule = NULL;
1585
1586 pData->port_forwarding_activated = 1;
1587 /* check mac here */
1588 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
1589 {
1590 struct socket *so;
1591 struct alias_link *link;
1592 struct libalias *lib;
1593 int flags;
1594 struct sockaddr sa;
1595 struct sockaddr_in *psin;
1596 socklen_t socketlen;
1597 struct in_addr alias;
1598 int rc;
1599 uint32_t guest_addr; /* need to understand if we already give address to guest */
1600
1601 if (rule->activated)
1602 continue; /*already activated */
1603#ifdef VBOX_WITH_NAT_SERVICE
1604 if (memcmp(rule->mac_address, ethdr->h_source, ETH_ALEN) != 0)
1605 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1606 guest_addr = find_guest_ip(pData, ethdr->h_source);
1607#else
1608#if 0
1609 if (memcmp(client_ethaddr, ethdr->h_source, ETH_ALEN) != 0)
1610 continue;
1611#endif
1612 guest_addr = find_guest_ip(pData, ethdr->h_source);
1613#endif
1614 if (guest_addr == INADDR_ANY)
1615 {
1616 /* the address wasn't granted */
1617 pData->port_forwarding_activated = 0;
1618 return;
1619 }
1620#if defined(DEBUG_vvl) && !defined(VBOX_WITH_NAT_SERVICE)
1621 Assert(rule->guest_addr.s_addr == guest_addr);
1622#endif
1623
1624 LogRel(("NAT: set redirect %s hp:%d gp:%d\n", (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1625 rule->host_port, rule->guest_port));
1626 if (rule->proto == IPPROTO_UDP)
1627 {
1628 so = udp_listen(pData, rule->bind_ip.s_addr, htons(rule->host_port), guest_addr,
1629 htons(rule->guest_port), 0);
1630 }
1631 else
1632 {
1633 so = solisten(pData, rule->bind_ip.s_addr, htons(rule->host_port), guest_addr,
1634 htons(rule->guest_port), 0);
1635 }
1636 if (so == NULL)
1637 {
1638 LogRel(("NAT: failed redirect %s hp:%d gp:%d\n", (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1639 rule->host_port, rule->guest_port));
1640 goto remove_port_forwarding;
1641 }
1642
1643 psin = (struct sockaddr_in *)&sa;
1644 psin->sin_family = AF_INET;
1645 psin->sin_port = 0;
1646 psin->sin_addr.s_addr = INADDR_ANY;
1647 socketlen = sizeof(struct sockaddr);
1648
1649 rc = getsockname(so->s, &sa, &socketlen);
1650 if (rc < 0 || sa.sa_family != AF_INET)
1651 {
1652 LogRel(("NAT: failed redirect %s hp:%d gp:%d\n", (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1653 rule->host_port, rule->guest_port));
1654 goto remove_port_forwarding;
1655 }
1656
1657 psin = (struct sockaddr_in *)&sa;
1658
1659
1660 alias.s_addr = htonl(ntohl(guest_addr) | CTL_ALIAS);
1661 alias_init(pData, &so->so_la, PKT_ALIAS_REVERSE, alias);
1662 link = LibAliasRedirectPort(so->so_la, psin->sin_addr, htons(rule->host_port),
1663 alias, htons(rule->guest_port),
1664 special_addr, -1, /* not very clear for now*/
1665 rule->proto);
1666 if (link == NULL)
1667 {
1668 LogRel(("NAT: failed redirect %s hp:%d gp:%d\n", (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1669 rule->host_port, rule->guest_port));
1670 goto remove_port_forwarding;
1671 }
1672 rule->activated = 1;
1673 continue;
1674 remove_port_forwarding:
1675 LIST_REMOVE(rule, list);
1676 RTMemFree(rule);
1677 }
1678}
1679
1680/**
1681 * Changes in 3.1 instead of opening new socket do the following:
1682 * gain more information:
1683 * 1. bind IP
1684 * 2. host port
1685 * 3. guest port
1686 * 4. proto
1687 * 5. guest MAC address
1688 * the guest's MAC address is rather important for service, but we easily
1689 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1690 * corresponding port-forwarding
1691 */
1692int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1693 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1694{
1695 struct port_forward_rule *rule = NULL;
1696 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1697 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1698 if (rule == NULL)
1699 return 1;
1700 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1701 rule->host_port = host_port;
1702 rule->guest_port = guest_port;
1703#ifndef VBOX_WITH_NAT_SERVICE
1704 rule->guest_addr.s_addr = guest_addr.s_addr;
1705#endif
1706 rule->bind_ip.s_addr = host_addr.s_addr;
1707 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1708 /* @todo add mac address */
1709 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1710 return 0;
1711}
1712
1713int slirp_add_exec(PNATState pData, int do_pty, const char *args, int addr_low_byte,
1714 int guest_port)
1715{
1716 return add_exec(&exec_list, do_pty, (char *)args,
1717 addr_low_byte, htons(guest_port));
1718}
1719
1720void slirp_set_ethaddr(PNATState pData, const uint8_t *ethaddr)
1721{
1722#ifndef VBOX_WITH_NAT_SERVICE
1723 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1724#endif
1725}
1726
1727#if defined(RT_OS_WINDOWS)
1728HANDLE *slirp_get_events(PNATState pData)
1729{
1730 return pData->phEvents;
1731}
1732void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1733{
1734 pData->phEvents[index] = hEvent;
1735}
1736#endif
1737
1738unsigned int slirp_get_timeout_ms(PNATState pData)
1739{
1740 if (link_up)
1741 {
1742 if (time_fasttimo)
1743 return 2;
1744 if (do_slowtimo)
1745 return 500; /* see PR_SLOWHZ */
1746 }
1747 return 0;
1748}
1749
1750#ifndef RT_OS_WINDOWS
1751int slirp_get_nsock(PNATState pData)
1752{
1753 return pData->nsock;
1754}
1755#endif
1756
1757/*
1758 * this function called from NAT thread
1759 */
1760void slirp_post_sent(PNATState pData, void *pvArg)
1761{
1762 struct socket *so = 0;
1763 struct tcpcb *tp = 0;
1764 struct mbuf *m = (struct mbuf *)pvArg;
1765 m_free(pData, m);
1766}
1767#ifdef VBOX_WITH_SLIRP_MT
1768void slirp_process_queue(PNATState pData)
1769{
1770 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1771}
1772void *slirp_get_queue(PNATState pData)
1773{
1774 return pData->pReqQueue;
1775}
1776#endif
1777
1778void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1779{
1780 Log2(("tftp_prefix:%s\n", tftpPrefix));
1781 tftp_prefix = tftpPrefix;
1782}
1783
1784void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1785{
1786 Log2(("bootFile:%s\n", bootFile));
1787 bootp_filename = bootFile;
1788}
1789
1790void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1791{
1792 Log2(("next_server:%s\n", next_server));
1793 if (next_server == NULL)
1794 pData->tftp_server.s_addr = htonl(ntohl(special_addr.s_addr) | CTL_TFTP);
1795 else
1796 inet_aton(next_server, &pData->tftp_server);
1797}
1798
1799int slirp_set_binding_address(PNATState pData, char *addr)
1800{
1801 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1802 {
1803 pData->bindIP.s_addr = INADDR_ANY;
1804 return 1;
1805 }
1806 return 0;
1807}
1808
1809void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1810{
1811 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1812 pData->use_dns_proxy = fDNSProxy;
1813}
1814
1815#define CHECK_ARG(name, val, lim_min, lim_max) \
1816do { \
1817 if ((val) < (lim_min) || (val) > (lim_max)) \
1818 { \
1819 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1820 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1821 return; \
1822 } \
1823 else \
1824 { \
1825 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1826 } \
1827} while (0)
1828
1829/* don't allow user set less 8kB and more than 1M values */
1830#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1831void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1832{
1833 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1834 pData->socket_rcv = kilobytes;
1835}
1836void slirp_set_sndbuf(PNATState pData, int kilobytes)
1837{
1838 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1839 pData->socket_snd = kilobytes * _1K;
1840}
1841void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1842{
1843 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1844 tcp_rcvspace = kilobytes * _1K;
1845}
1846void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1847{
1848 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1849 tcp_sndspace = kilobytes * _1K;
1850}
1851
1852/*
1853 * Looking for Ether by ip in ARP-cache
1854 * Note: it´s responsible of caller to allocate buffer for result
1855 * @returns 0 - if found, 1 - otherwise
1856 */
1857int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1858{
1859 struct arp_cache_entry *ac = NULL;
1860 int rc = 1;
1861 if (ether == NULL)
1862 return rc;
1863
1864 if (LIST_EMPTY(&pData->arp_cache))
1865 return rc;
1866
1867 LIST_FOREACH(ac, &pData->arp_cache, list)
1868 {
1869 if (ac->ip == ip)
1870 {
1871 memcpy(ether, ac->ether, ETH_ALEN);
1872 rc = 0;
1873 return rc;
1874 }
1875 }
1876 return rc;
1877}
1878
1879/*
1880 * Looking for IP by Ether in ARP-cache
1881 * Note: it´s responsible of caller to allocate buffer for result
1882 * @returns 0 - if found, 1 - otherwise
1883 */
1884int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1885{
1886 struct arp_cache_entry *ac = NULL;
1887 int rc = 1;
1888 *ip = INADDR_ANY;
1889 if (LIST_EMPTY(&pData->arp_cache))
1890 return rc;
1891 LIST_FOREACH(ac, &pData->arp_cache, list)
1892 {
1893 if (memcmp(ether, ac->ether, ETH_ALEN))
1894 {
1895 *ip = ac->ip;
1896 rc = 0;
1897 return rc;
1898 }
1899 }
1900 return rc;
1901}
1902
1903void slirp_arp_who_has(PNATState pData, uint32_t dst)
1904{
1905 struct mbuf *m;
1906 struct ethhdr *ehdr;
1907 struct arphdr *ahdr;
1908
1909 m = m_get(pData);
1910 if (m == NULL)
1911 {
1912 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
1913 return;
1914 }
1915 ehdr = mtod(m, struct ethhdr *);
1916 memset(ehdr->h_source, 0xff, ETH_ALEN);
1917 ahdr = (struct arphdr *)&ehdr[1];
1918 ahdr->ar_hrd = htons(1);
1919 ahdr->ar_pro = htons(ETH_P_IP);
1920 ahdr->ar_hln = ETH_ALEN;
1921 ahdr->ar_pln = 4;
1922 ahdr->ar_op = htons(ARPOP_REQUEST);
1923 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
1924 *(uint32_t *)ahdr->ar_sip = htonl(ntohl(special_addr.s_addr) | CTL_ALIAS);
1925 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
1926 *(uint32_t *)ahdr->ar_tip = dst;
1927 m->m_data += if_maxlinkhdr;
1928 m->m_len = sizeof(struct arphdr);
1929 if_encap(pData, ETH_P_ARP, m);
1930 LogRel(("NAT: ARP request sent\n"));
1931}
1932
1933/* updates the arp cache
1934 * @returns 0 - if has found and updated
1935 * 1 - if hasn't found.
1936 */
1937int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
1938{
1939 struct arp_cache_entry *ac;
1940 LIST_FOREACH(ac, &pData->arp_cache, list)
1941 {
1942 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
1943 {
1944 ac->ip = dst;
1945 return 0;
1946 }
1947 }
1948 return 1;
1949}
1950
1951void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
1952{
1953 struct arp_cache_entry *ac = NULL;
1954 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
1955 if (ac == NULL)
1956 {
1957 LogRel(("NAT: Can't allocate arp cache entry\n"));
1958 return;
1959 }
1960 ac->ip = ip;
1961 memcpy(ac->ether, ether, ETH_ALEN);
1962 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
1963}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette