VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 30016

Last change on this file since 30016 was 30016, checked in by vboxsync, 15 years ago

NAT: clean up.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.7 KB
Line 
1/* $Id: slirp.c 30016 2010-06-03 18:31:14Z vboxsync $ */
2/** @file
3 * NAT - slirp glue.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * libslirp glue
22 *
23 * Copyright (c) 2004-2008 Fabrice Bellard
24 *
25 * Permission is hereby granted, free of charge, to any person obtaining a copy
26 * of this software and associated documentation files (the "Software"), to deal
27 * in the Software without restriction, including without limitation the rights
28 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29 * copies of the Software, and to permit persons to whom the Software is
30 * furnished to do so, subject to the following conditions:
31 *
32 * The above copyright notice and this permission notice shall be included in
33 * all copies or substantial portions of the Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41 * THE SOFTWARE.
42 */
43
44#include "slirp.h"
45#ifdef RT_OS_OS2
46# include <paths.h>
47#endif
48
49#include <VBox/err.h>
50#include <VBox/pdmdrv.h>
51#include <iprt/assert.h>
52#include <iprt/file.h>
53#ifndef RT_OS_WINDOWS
54# include <sys/ioctl.h>
55# include <poll.h>
56#else
57# include <Winnls.h>
58# define _WINSOCK2API_
59# include <IPHlpApi.h>
60#endif
61#include <alias.h>
62
63#ifndef RT_OS_WINDOWS
64
65# define DO_ENGAGE_EVENT1(so, fdset, label) \
66 do { \
67 if ( so->so_poll_index != -1 \
68 && so->s == polls[so->so_poll_index].fd) \
69 { \
70 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
71 break; \
72 } \
73 AssertRelease(poll_index < (nfds)); \
74 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
75 polls[poll_index].fd = (so)->s; \
76 (so)->so_poll_index = poll_index; \
77 polls[poll_index].events = N_(fdset ## _poll); \
78 polls[poll_index].revents = 0; \
79 poll_index++; \
80 } while (0)
81
82# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
83 do { \
84 if ( so->so_poll_index != -1 \
85 && so->s == polls[so->so_poll_index].fd) \
86 { \
87 polls[so->so_poll_index].events |= \
88 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
89 break; \
90 } \
91 AssertRelease(poll_index < (nfds)); \
92 polls[poll_index].fd = (so)->s; \
93 (so)->so_poll_index = poll_index; \
94 polls[poll_index].events = \
95 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
96 poll_index++; \
97 } while (0)
98
99# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
100
101/*
102 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
103 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
104 * used to catch POLLNVAL while logging and return false in case of error while
105 * normal usage.
106 */
107# define DO_CHECK_FD_SET(so, events, fdset) \
108 ( ((so)->so_poll_index != -1) \
109 && ((so)->so_poll_index <= ndfs) \
110 && ((so)->s == polls[so->so_poll_index].fd) \
111 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
112 && ( N_(fdset ## _poll) == POLLNVAL \
113 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
114
115 /* specific for Unix API */
116# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
117 /* specific for Windows Winsock API */
118# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
119
120# ifndef RT_OS_LINUX
121# define readfds_poll (POLLRDNORM)
122# define writefds_poll (POLLWRNORM)
123# else
124# define readfds_poll (POLLIN)
125# define writefds_poll (POLLOUT)
126# endif
127# define xfds_poll (POLLPRI)
128# define closefds_poll (POLLHUP)
129# define rderr_poll (POLLERR)
130# define rdhup_poll (POLLHUP)
131# define nval_poll (POLLNVAL)
132
133# define ICMP_ENGAGE_EVENT(so, fdset) \
134 do { \
135 if (pData->icmp_socket.s != -1) \
136 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
137 } while (0)
138
139#else /* RT_OS_WINDOWS */
140
141/*
142 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
143 * So no call to WSAEventSelect necessary.
144 */
145# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
146
147/*
148 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
149 */
150# define DO_ENGAGE_EVENT1(so, fdset1, label) \
151 do { \
152 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
153 if (rc == SOCKET_ERROR) \
154 { \
155 /* This should not happen */ \
156 error = WSAGetLastError(); \
157 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
158 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
159 } \
160 } while (0); \
161 CONTINUE(label)
162
163# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
164 DO_ENGAGE_EVENT1((so), (fdset1), label)
165
166# define DO_POLL_EVENTS(rc, error, so, events, label) \
167 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
168 if ((rc) == SOCKET_ERROR) \
169 { \
170 (error) = WSAGetLastError(); \
171 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
172 CONTINUE(label); \
173 }
174
175# define acceptds_win FD_ACCEPT
176# define acceptds_win_bit FD_ACCEPT_BIT
177# define readfds_win FD_READ
178# define readfds_win_bit FD_READ_BIT
179# define writefds_win FD_WRITE
180# define writefds_win_bit FD_WRITE_BIT
181# define xfds_win FD_OOB
182# define xfds_win_bit FD_OOB_BIT
183# define closefds_win FD_CLOSE
184# define closefds_win_bit FD_CLOSE_BIT
185
186# define closefds_win FD_CLOSE
187# define closefds_win_bit FD_CLOSE_BIT
188
189# define DO_CHECK_FD_SET(so, events, fdset) \
190 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
191
192# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
193# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
194
195#endif /* RT_OS_WINDOWS */
196
197#define TCP_ENGAGE_EVENT1(so, fdset) \
198 DO_ENGAGE_EVENT1((so), fdset, tcp)
199
200#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
201 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
202
203#define UDP_ENGAGE_EVENT(so, fdset) \
204 DO_ENGAGE_EVENT1((so), fdset, udp)
205
206#define POLL_TCP_EVENTS(rc, error, so, events) \
207 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
208
209#define POLL_UDP_EVENTS(rc, error, so, events) \
210 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
211
212#define CHECK_FD_SET(so, events, set) \
213 (DO_CHECK_FD_SET((so), (events), set))
214
215#define WIN_CHECK_FD_SET(so, events, set) \
216 (DO_WIN_CHECK_FD_SET((so), (events), set))
217
218#define UNIX_CHECK_FD_SET(so, events, set) \
219 (DO_UNIX_CHECK_FD_SET(so, events, set))
220
221/*
222 * Loging macros
223 */
224#if VBOX_WITH_DEBUG_NAT_SOCKETS
225# if defined(RT_OS_WINDOWS)
226# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
227 do { \
228 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
229 } while (0)
230# else /* !RT_OS_WINDOWS */
231# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
232 do { \
233 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
234 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
235 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
236 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
237 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
238 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
239 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
240 } while (0)
241# endif /* !RT_OS_WINDOWS */
242#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
243# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
244#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
245
246#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
247 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
248
249static void activate_port_forwarding(PNATState, const uint8_t *pEther);
250
251static const uint8_t special_ethaddr[6] =
252{
253 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
254};
255
256static const uint8_t broadcast_ethaddr[6] =
257{
258 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
259};
260
261const uint8_t zerro_ethaddr[6] =
262{
263 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
264};
265
266#ifdef RT_OS_WINDOWS
267static int get_dns_addr_domain(PNATState pData, bool fVerbose,
268 struct in_addr *pdns_addr,
269 const char **ppszDomain)
270{
271 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
272 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
273 PIP_ADAPTER_ADDRESSES pAddr = NULL;
274 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
275 ULONG size;
276 int wlen = 0;
277 char *pszSuffix;
278 struct dns_domain_entry *pDomain = NULL;
279 ULONG ret = ERROR_SUCCESS;
280
281 /* @todo add SKIPing flags to get only required information */
282
283 /* determine size of buffer */
284 size = 0;
285 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
286 if (ret != ERROR_BUFFER_OVERFLOW)
287 {
288 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
289 return -1;
290 }
291 if (size == 0)
292 {
293 LogRel(("NAT: Win socket API returns non capacity\n"));
294 return -1;
295 }
296
297 pAdapterAddr = RTMemAllocZ(size);
298 if (!pAdapterAddr)
299 {
300 LogRel(("NAT: No memory available \n"));
301 return -1;
302 }
303 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
304 if (ret != ERROR_SUCCESS)
305 {
306 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
307 RTMemFree(pAdapterAddr);
308 return -1;
309 }
310
311 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
312 {
313 int found;
314 if (pAddr->OperStatus != IfOperStatusUp)
315 continue;
316
317 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
318 {
319 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
320 struct in_addr InAddr;
321 struct dns_entry *pDns;
322
323 if (SockAddr->sa_family != AF_INET)
324 continue;
325
326 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
327
328 /* add dns server to list */
329 pDns = RTMemAllocZ(sizeof(struct dns_entry));
330 if (!pDns)
331 {
332 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
333 RTMemFree(pAdapterAddr);
334 return VERR_NO_MEMORY;
335 }
336
337 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
338 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
339 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
340 else
341 pDns->de_addr.s_addr = InAddr.s_addr;
342
343 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
344
345 if (pAddr->DnsSuffix == NULL)
346 continue;
347
348 /* uniq */
349 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
350 if (!pszSuffix || strlen(pszSuffix) == 0)
351 {
352 RTStrFree(pszSuffix);
353 continue;
354 }
355
356 found = 0;
357 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
358 {
359 if ( pDomain->dd_pszDomain != NULL
360 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
361 {
362 found = 1;
363 RTStrFree(pszSuffix);
364 break;
365 }
366 }
367 if (!found)
368 {
369 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
370 if (!pDomain)
371 {
372 LogRel(("NAT: not enough memory\n"));
373 RTStrFree(pszSuffix);
374 RTMemFree(pAdapterAddr);
375 return VERR_NO_MEMORY;
376 }
377 pDomain->dd_pszDomain = pszSuffix;
378 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
379 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
380 }
381 }
382 }
383 RTMemFree(pAdapterAddr);
384 return 0;
385}
386
387#else /* !RT_OS_WINDOWS */
388
389static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
390{
391 size_t cbRead;
392 char bTest;
393 int rc = VERR_NO_MEMORY;
394 char *pu8Buf = (char *)pvBuf;
395 *pcbRead = 0;
396
397 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
398 && (pu8Buf - (char *)pvBuf) < cbBufSize)
399 {
400 if (cbRead == 0)
401 return VERR_EOF;
402
403 if (bTest == '\r' || bTest == '\n')
404 {
405 *pu8Buf = 0;
406 return VINF_SUCCESS;
407 }
408 *pu8Buf = bTest;
409 pu8Buf++;
410 (*pcbRead)++;
411 }
412 return rc;
413}
414
415static int get_dns_addr_domain(PNATState pData, bool fVerbose,
416 struct in_addr *pdns_addr,
417 const char **ppszDomain)
418{
419 char buff[512];
420 char buff2[256];
421 RTFILE f;
422 int cNameserversFound = 0;
423 int fWarnTooManyDnsServers = 0;
424 struct in_addr tmp_addr;
425 int rc;
426 size_t bytes;
427
428# ifdef RT_OS_OS2
429 /* Try various locations. */
430 char *etc = getenv("ETC");
431 if (etc)
432 {
433 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
434 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
435 }
436 if (RT_FAILURE(rc))
437 {
438 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
439 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
440 }
441 if (RT_FAILURE(rc))
442 {
443 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
444 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
445 }
446# else /* !RT_OS_OS2 */
447# ifndef DEBUG_vvl
448 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
449# else
450 char *home = getenv("HOME");
451 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
452 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
453 if (RT_SUCCESS(rc))
454 {
455 Log(("NAT: DNS we're using %s\n", buff));
456 }
457 else
458 {
459 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
460 Log(("NAT: DNS we're using %s\n", buff));
461 }
462# endif
463# endif /* !RT_OS_OS2 */
464 if (RT_FAILURE(rc))
465 return -1;
466
467 if (ppszDomain)
468 *ppszDomain = NULL;
469
470 Log(("NAT: DNS Servers:\n"));
471 while ( RT_SUCCESS(rc = RTFileGets(f, buff, sizeof(buff), &bytes))
472 && rc != VERR_EOF)
473 {
474 struct dns_entry *pDns = NULL;
475 if ( cNameserversFound == 4
476 && fWarnTooManyDnsServers == 0
477 && sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1)
478 {
479 fWarnTooManyDnsServers = 1;
480 LogRel(("NAT: too many nameservers registered.\n"));
481 }
482 if ( sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1
483 && cNameserversFound < 4) /* Unix doesn't accept more than 4 name servers*/
484 {
485 if (!inet_aton(buff2, &tmp_addr))
486 continue;
487
488 /* localhost mask */
489 pDns = RTMemAllocZ(sizeof (struct dns_entry));
490 if (!pDns)
491 {
492 LogRel(("can't alloc memory for DNS entry\n"));
493 return -1;
494 }
495
496 /* check */
497 pDns->de_addr.s_addr = tmp_addr.s_addr;
498 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
499 {
500 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
501 }
502 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
503 cNameserversFound++;
504 }
505 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
506 {
507 char *tok;
508 char *saveptr;
509 struct dns_domain_entry *pDomain = NULL;
510 int fFoundDomain = 0;
511 tok = strtok_r(&buff[6], " \t\n", &saveptr);
512 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
513 {
514 if ( tok != NULL
515 && strcmp(tok, pDomain->dd_pszDomain) == 0)
516 {
517 fFoundDomain = 1;
518 break;
519 }
520 }
521 if (tok != NULL && !fFoundDomain)
522 {
523 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
524 if (!pDomain)
525 {
526 LogRel(("NAT: not enought memory to add domain list\n"));
527 return VERR_NO_MEMORY;
528 }
529 pDomain->dd_pszDomain = RTStrDup(tok);
530 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
531 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
532 }
533 }
534 }
535 RTFileClose(f);
536 if (!cNameserversFound)
537 return -1;
538 return 0;
539}
540
541#endif /* !RT_OS_WINDOWS */
542
543int slirp_init_dns_list(PNATState pData)
544{
545 TAILQ_INIT(&pData->pDnsList);
546 LIST_INIT(&pData->pDomainList);
547 return get_dns_addr_domain(pData, true, NULL, NULL);
548}
549
550void slirp_release_dns_list(PNATState pData)
551{
552 struct dns_entry *pDns = NULL;
553 struct dns_domain_entry *pDomain = NULL;
554
555 while (!TAILQ_EMPTY(&pData->pDnsList))
556 {
557 pDns = TAILQ_FIRST(&pData->pDnsList);
558 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
559 RTMemFree(pDns);
560 }
561
562 while (!LIST_EMPTY(&pData->pDomainList))
563 {
564 pDomain = LIST_FIRST(&pData->pDomainList);
565 LIST_REMOVE(pDomain, dd_list);
566 if (pDomain->dd_pszDomain != NULL)
567 RTStrFree(pDomain->dd_pszDomain);
568 RTMemFree(pDomain);
569 }
570}
571
572int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
573{
574 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
575}
576
577int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
578 bool fPassDomain, bool fUseHostResolver, int i32AliasMode, void *pvUser)
579{
580 int fNATfailed = 0;
581 int rc;
582 PNATState pData = RTMemAllocZ(sizeof(NATState));
583 *ppData = pData;
584 if (!pData)
585 return VERR_NO_MEMORY;
586 if (u32Netmask & 0x1f)
587 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
588 return VERR_INVALID_PARAMETER;
589 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
590 pData->fUseHostResolver = fUseHostResolver;
591 pData->pvUser = pvUser;
592 pData->netmask = u32Netmask;
593
594 /* sockets & TCP defaults */
595 pData->socket_rcv = 64 * _1K;
596 pData->socket_snd = 64 * _1K;
597 tcp_sndspace = 64 * _1K;
598 tcp_rcvspace = 64 * _1K;
599
600#ifdef RT_OS_WINDOWS
601 {
602 WSADATA Data;
603 WSAStartup(MAKEWORD(2, 0), &Data);
604 }
605 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
606#endif
607#ifdef VBOX_WITH_SLIRP_MT
608 QSOCKET_LOCK_CREATE(tcb);
609 QSOCKET_LOCK_CREATE(udb);
610 rc = RTReqCreateQueue(&pData->pReqQueue);
611 AssertReleaseRC(rc);
612#endif
613
614 link_up = 1;
615
616 rc = bootp_dhcp_init(pData);
617 if (rc != 0)
618 {
619 LogRel(("NAT: DHCP server initialization was failed\n"));
620 return VINF_NAT_DNS;
621 }
622 debug_init();
623 if_init(pData);
624 ip_init(pData);
625 icmp_init(pData);
626
627 /* Initialise mbufs *after* setting the MTU */
628 mbuf_init(pData);
629
630 pData->special_addr.s_addr = u32NetAddr;
631 pData->slirp_ethaddr = &special_ethaddr[0];
632 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
633 /* @todo: add ability to configure this staff */
634
635 /* set default addresses */
636 inet_aton("127.0.0.1", &loopback_addr);
637 if (!pData->fUseHostResolver)
638 {
639 if (slirp_init_dns_list(pData) < 0)
640 fNATfailed = 1;
641
642 dnsproxy_init(pData);
643 }
644 if (i32AliasMode & ~(PKT_ALIAS_LOG|PKT_ALIAS_SAME_PORTS|PKT_ALIAS_PROXY_ONLY))
645 {
646 LogRel(("NAT: alias mode %x is ignored\n", i32AliasMode));
647 i32AliasMode = 0;
648 }
649 pData->i32AliasMode = i32AliasMode;
650 getouraddr(pData);
651 {
652 int flags = 0;
653 struct in_addr proxy_addr;
654 pData->proxy_alias = LibAliasInit(pData, NULL);
655 if (pData->proxy_alias == NULL)
656 {
657 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
658 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
659 }
660 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
661#ifndef NO_FW_PUNCH
662 flags |= PKT_ALIAS_PUNCH_FW;
663#endif
664 flags |= pData->i32AliasMode; /* do transparent proxying */
665 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
666 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
667 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
668 ftp_alias_load(pData);
669 nbt_alias_load(pData);
670 if (pData->fUseHostResolver)
671 dns_alias_load(pData);
672 }
673 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
674}
675
676/**
677 * Register statistics.
678 */
679void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
680{
681#ifdef VBOX_WITH_STATISTICS
682# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
683# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
684# include "counters.h"
685# undef COUNTER
686/** @todo register statistics for the variables dumped by:
687 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
688 * mbufstats(pData); sockstats(pData); */
689#endif /* VBOX_WITH_STATISTICS */
690}
691
692/**
693 * Deregister statistics.
694 */
695void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
696{
697 if (pData == NULL)
698 return;
699#ifdef VBOX_WITH_STATISTICS
700# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
701# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
702# include "counters.h"
703#endif /* VBOX_WITH_STATISTICS */
704}
705
706/**
707 * Marks the link as up, making it possible to establish new connections.
708 */
709void slirp_link_up(PNATState pData)
710{
711 struct arp_cache_entry *ac;
712 link_up = 1;
713
714 if (LIST_EMPTY(&pData->arp_cache))
715 return;
716
717 LIST_FOREACH(ac, &pData->arp_cache, list)
718 {
719 activate_port_forwarding(pData, ac->ether);
720 }
721}
722
723/**
724 * Marks the link as down and cleans up the current connections.
725 */
726void slirp_link_down(PNATState pData)
727{
728 struct socket *so;
729 struct port_forward_rule *rule;
730
731 while ((so = tcb.so_next) != &tcb)
732 {
733 if (so->so_state & SS_NOFDREF || so->s == -1)
734 sofree(pData, so);
735 else
736 tcp_drop(pData, sototcpcb(so), 0);
737 }
738
739 while ((so = udb.so_next) != &udb)
740 udp_detach(pData, so);
741
742 /*
743 * Clear the active state of port-forwarding rules to force
744 * re-setup on restoration of communications.
745 */
746 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
747 {
748 rule->activated = 0;
749 }
750 pData->cRedirectionsActive = 0;
751
752 link_up = 0;
753}
754
755/**
756 * Terminates the slirp component.
757 */
758void slirp_term(PNATState pData)
759{
760 if (pData == NULL)
761 return;
762#ifdef RT_OS_WINDOWS
763 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
764 FreeLibrary(pData->hmIcmpLibrary);
765 RTMemFree(pData->pvIcmpBuffer);
766#else
767 closesocket(pData->icmp_socket.s);
768#endif
769
770 slirp_link_down(pData);
771 slirp_release_dns_list(pData);
772 ftp_alias_unload(pData);
773 nbt_alias_unload(pData);
774 if (pData->fUseHostResolver)
775 dns_alias_unload(pData);
776 while (!LIST_EMPTY(&instancehead))
777 {
778 struct libalias *la = LIST_FIRST(&instancehead);
779 /* libalias do all clean up */
780 LibAliasUninit(la);
781 }
782 while (!LIST_EMPTY(&pData->arp_cache))
783 {
784 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
785 LIST_REMOVE(ac, list);
786 RTMemFree(ac);
787 }
788 bootp_dhcp_fini(pData);
789 m_fini(pData);
790#ifdef RT_OS_WINDOWS
791 WSACleanup();
792#endif
793#ifdef LOG_ENABLED
794 Log(("\n"
795 "NAT statistics\n"
796 "--------------\n"
797 "\n"));
798 ipstats(pData);
799 tcpstats(pData);
800 udpstats(pData);
801 icmpstats(pData);
802 mbufstats(pData);
803 sockstats(pData);
804 Log(("\n"
805 "\n"
806 "\n"));
807#endif
808 RTMemFree(pData);
809}
810
811
812#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
813#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
814
815/*
816 * curtime kept to an accuracy of 1ms
817 */
818static void updtime(PNATState pData)
819{
820#ifdef RT_OS_WINDOWS
821 struct _timeb tb;
822
823 _ftime(&tb);
824 curtime = (u_int)tb.time * (u_int)1000;
825 curtime += (u_int)tb.millitm;
826#else
827 gettimeofday(&tt, 0);
828
829 curtime = (u_int)tt.tv_sec * (u_int)1000;
830 curtime += (u_int)tt.tv_usec / (u_int)1000;
831
832 if ((tt.tv_usec % 1000) >= 500)
833 curtime++;
834#endif
835}
836
837#ifdef RT_OS_WINDOWS
838void slirp_select_fill(PNATState pData, int *pnfds)
839#else /* RT_OS_WINDOWS */
840void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
841#endif /* !RT_OS_WINDOWS */
842{
843 struct socket *so, *so_next;
844 int nfds;
845#if defined(RT_OS_WINDOWS)
846 int rc;
847 int error;
848#else
849 int poll_index = 0;
850#endif
851 int i;
852
853 STAM_PROFILE_START(&pData->StatFill, a);
854
855 nfds = *pnfds;
856
857 /*
858 * First, TCP sockets
859 */
860 do_slowtimo = 0;
861 if (!link_up)
862 goto done;
863
864 /*
865 * *_slowtimo needs calling if there are IP fragments
866 * in the fragment queue, or there are TCP connections active
867 */
868 /* XXX:
869 * triggering of fragment expiration should be the same but use new macroses
870 */
871 do_slowtimo = (tcb.so_next != &tcb);
872 if (!do_slowtimo)
873 {
874 for (i = 0; i < IPREASS_NHASH; i++)
875 {
876 if (!TAILQ_EMPTY(&ipq[i]))
877 {
878 do_slowtimo = 1;
879 break;
880 }
881 }
882 }
883 /* always add the ICMP socket */
884#ifndef RT_OS_WINDOWS
885 pData->icmp_socket.so_poll_index = -1;
886#endif
887 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
888
889 STAM_COUNTER_RESET(&pData->StatTCP);
890 STAM_COUNTER_RESET(&pData->StatTCPHot);
891
892 QSOCKET_FOREACH(so, so_next, tcp)
893 /* { */
894#if !defined(RT_OS_WINDOWS)
895 so->so_poll_index = -1;
896#endif
897 STAM_COUNTER_INC(&pData->StatTCP);
898
899 /*
900 * See if we need a tcp_fasttimo
901 */
902 if ( time_fasttimo == 0
903 && so->so_tcpcb != NULL
904 && so->so_tcpcb->t_flags & TF_DELACK)
905 {
906 time_fasttimo = curtime; /* Flag when we want a fasttimo */
907 }
908
909 /*
910 * NOFDREF can include still connecting to local-host,
911 * newly socreated() sockets etc. Don't want to select these.
912 */
913 if (so->so_state & SS_NOFDREF || so->s == -1)
914 CONTINUE(tcp);
915
916 /*
917 * Set for reading sockets which are accepting
918 */
919 if (so->so_state & SS_FACCEPTCONN)
920 {
921 STAM_COUNTER_INC(&pData->StatTCPHot);
922 TCP_ENGAGE_EVENT1(so, readfds);
923 CONTINUE(tcp);
924 }
925
926 /*
927 * Set for writing sockets which are connecting
928 */
929 if (so->so_state & SS_ISFCONNECTING)
930 {
931 Log2(("connecting %R[natsock] engaged\n",so));
932 STAM_COUNTER_INC(&pData->StatTCPHot);
933 TCP_ENGAGE_EVENT1(so, writefds);
934 }
935
936 /*
937 * Set for writing if we are connected, can send more, and
938 * we have something to send
939 */
940 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc)
941 {
942 STAM_COUNTER_INC(&pData->StatTCPHot);
943 TCP_ENGAGE_EVENT1(so, writefds);
944 }
945
946 /*
947 * Set for reading (and urgent data) if we are connected, can
948 * receive more, and we have room for it XXX /2 ?
949 */
950 if (CONN_CANFRCV(so) && (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2)))
951 {
952 STAM_COUNTER_INC(&pData->StatTCPHot);
953 TCP_ENGAGE_EVENT2(so, readfds, xfds);
954 }
955 LOOP_LABEL(tcp, so, so_next);
956 }
957
958 /*
959 * UDP sockets
960 */
961 STAM_COUNTER_RESET(&pData->StatUDP);
962 STAM_COUNTER_RESET(&pData->StatUDPHot);
963
964 QSOCKET_FOREACH(so, so_next, udp)
965 /* { */
966
967 STAM_COUNTER_INC(&pData->StatUDP);
968#if !defined(RT_OS_WINDOWS)
969 so->so_poll_index = -1;
970#endif
971
972 /*
973 * See if it's timed out
974 */
975 if (so->so_expire)
976 {
977 if (so->so_expire <= curtime)
978 {
979 Log2(("NAT: %R[natsock] expired\n", so));
980 if (so->so_timeout != NULL)
981 {
982 so->so_timeout(pData, so, so->so_timeout_arg);
983 }
984#ifdef VBOX_WITH_SLIRP_MT
985 /* we need so_next for continue our cycle*/
986 so_next = so->so_next;
987#endif
988 UDP_DETACH(pData, so, so_next);
989 CONTINUE_NO_UNLOCK(udp);
990 }
991 else
992 {
993 do_slowtimo = 1; /* Let socket expire */
994 }
995 }
996
997 /*
998 * When UDP packets are received from over the link, they're
999 * sendto()'d straight away, so no need for setting for writing
1000 * Limit the number of packets queued by this session to 4.
1001 * Note that even though we try and limit this to 4 packets,
1002 * the session could have more queued if the packets needed
1003 * to be fragmented.
1004 *
1005 * (XXX <= 4 ?)
1006 */
1007 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
1008 {
1009 STAM_COUNTER_INC(&pData->StatUDPHot);
1010 UDP_ENGAGE_EVENT(so, readfds);
1011 }
1012 LOOP_LABEL(udp, so, so_next);
1013 }
1014done:
1015
1016#if defined(RT_OS_WINDOWS)
1017 *pnfds = VBOX_EVENT_COUNT;
1018#else /* RT_OS_WINDOWS */
1019 AssertRelease(poll_index <= *pnfds);
1020 *pnfds = poll_index;
1021#endif /* !RT_OS_WINDOWS */
1022
1023 STAM_PROFILE_STOP(&pData->StatFill, a);
1024}
1025
1026#if defined(RT_OS_WINDOWS)
1027void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1028#else /* RT_OS_WINDOWS */
1029void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1030#endif /* !RT_OS_WINDOWS */
1031{
1032 struct socket *so, *so_next;
1033 int ret;
1034#if defined(RT_OS_WINDOWS)
1035 WSANETWORKEVENTS NetworkEvents;
1036 int rc;
1037 int error;
1038#else
1039 int poll_index = 0;
1040#endif
1041
1042 STAM_PROFILE_START(&pData->StatPoll, a);
1043
1044 /* Update time */
1045 updtime(pData);
1046
1047 /*
1048 * See if anything has timed out
1049 */
1050 if (link_up)
1051 {
1052 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1053 {
1054 STAM_PROFILE_START(&pData->StatFastTimer, b);
1055 tcp_fasttimo(pData);
1056 time_fasttimo = 0;
1057 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1058 }
1059 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1060 {
1061 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1062 ip_slowtimo(pData);
1063 tcp_slowtimo(pData);
1064 last_slowtimo = curtime;
1065 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1066 }
1067 }
1068#if defined(RT_OS_WINDOWS)
1069 if (fTimeout)
1070 return; /* only timer update */
1071#endif
1072
1073 /*
1074 * Check sockets
1075 */
1076 if (!link_up)
1077 goto done;
1078#if defined(RT_OS_WINDOWS)
1079 /*XXX: before renaming please make see define
1080 * fIcmp in slirp_state.h
1081 */
1082 if (fIcmp)
1083 sorecvfrom(pData, &pData->icmp_socket);
1084#else
1085 if ( (pData->icmp_socket.s != -1)
1086 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1087 sorecvfrom(pData, &pData->icmp_socket);
1088#endif
1089 /*
1090 * Check TCP sockets
1091 */
1092 QSOCKET_FOREACH(so, so_next, tcp)
1093 /* { */
1094
1095#ifdef VBOX_WITH_SLIRP_MT
1096 if ( so->so_state & SS_NOFDREF
1097 && so->so_deleted == 1)
1098 {
1099 struct socket *son, *sop = NULL;
1100 QSOCKET_LOCK(tcb);
1101 if (so->so_next != NULL)
1102 {
1103 if (so->so_next != &tcb)
1104 SOCKET_LOCK(so->so_next);
1105 son = so->so_next;
1106 }
1107 if ( so->so_prev != &tcb
1108 && so->so_prev != NULL)
1109 {
1110 SOCKET_LOCK(so->so_prev);
1111 sop = so->so_prev;
1112 }
1113 QSOCKET_UNLOCK(tcb);
1114 remque(pData, so);
1115 NSOCK_DEC();
1116 SOCKET_UNLOCK(so);
1117 SOCKET_LOCK_DESTROY(so);
1118 RTMemFree(so);
1119 so_next = son;
1120 if (sop != NULL)
1121 SOCKET_UNLOCK(sop);
1122 CONTINUE_NO_UNLOCK(tcp);
1123 }
1124#endif
1125 /*
1126 * FD_ISSET is meaningless on these sockets
1127 * (and they can crash the program)
1128 */
1129 if (so->so_state & SS_NOFDREF || so->s == -1)
1130 CONTINUE(tcp);
1131
1132 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1133
1134 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1135
1136
1137 /*
1138 * Check for URG data
1139 * This will soread as well, so no need to
1140 * test for readfds below if this succeeds
1141 */
1142
1143 /* out-of-band data */
1144 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1145#ifdef RT_OS_DARWIN
1146 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1147 * combination on other Unixs hosts doesn't enter to this branch
1148 */
1149 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1150#endif
1151 )
1152 {
1153 sorecvoob(pData, so);
1154 }
1155
1156 /*
1157 * Check sockets for reading
1158 */
1159 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1160 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1161 {
1162 /*
1163 * Check for incoming connections
1164 */
1165 if (so->so_state & SS_FACCEPTCONN)
1166 {
1167 TCP_CONNECT(pData, so);
1168 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1169 CONTINUE(tcp);
1170 }
1171
1172 ret = soread(pData, so);
1173 /* Output it if we read something */
1174 if (RT_LIKELY(ret > 0))
1175 TCP_OUTPUT(pData, sototcpcb(so));
1176 }
1177
1178 /*
1179 * Check for FD_CLOSE events.
1180 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1181 */
1182 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1183 || (so->so_close == 1))
1184 {
1185 /*
1186 * drain the socket
1187 */
1188 for (;;)
1189 {
1190 ret = soread(pData, so);
1191 if (ret > 0)
1192 TCP_OUTPUT(pData, sototcpcb(so));
1193 else
1194 {
1195 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1196 break;
1197 }
1198 }
1199 /* mark the socket for termination _after_ it was drained */
1200 so->so_close = 1;
1201 CONTINUE(tcp);
1202 }
1203
1204 /*
1205 * Check sockets for writing
1206 */
1207 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1208 {
1209 /*
1210 * Check for non-blocking, still-connecting sockets
1211 */
1212 if (so->so_state & SS_ISFCONNECTING)
1213 {
1214 Log2(("connecting %R[natsock] catched\n", so));
1215 /* Connected */
1216 so->so_state &= ~SS_ISFCONNECTING;
1217
1218 /*
1219 * This should be probably guarded by PROBE_CONN too. Anyway,
1220 * we disable it on OS/2 because the below send call returns
1221 * EFAULT which causes the opened TCP socket to close right
1222 * after it has been opened and connected.
1223 */
1224#ifndef RT_OS_OS2
1225 ret = send(so->s, (const char *)&ret, 0, 0);
1226 if (ret < 0)
1227 {
1228 /* XXXXX Must fix, zero bytes is a NOP */
1229 if ( errno == EAGAIN
1230 || errno == EWOULDBLOCK
1231 || errno == EINPROGRESS
1232 || errno == ENOTCONN)
1233 CONTINUE(tcp);
1234
1235 /* else failed */
1236 so->so_state = SS_NOFDREF;
1237 }
1238 /* else so->so_state &= ~SS_ISFCONNECTING; */
1239#endif
1240
1241 /*
1242 * Continue tcp_input
1243 */
1244 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1245 /* continue; */
1246 }
1247 else
1248 SOWRITE(ret, pData, so);
1249 /*
1250 * XXX If we wrote something (a lot), there could be the need
1251 * for a window update. In the worst case, the remote will send
1252 * a window probe to get things going again.
1253 */
1254 }
1255
1256 /*
1257 * Probe a still-connecting, non-blocking socket
1258 * to check if it's still alive
1259 */
1260#ifdef PROBE_CONN
1261 if (so->so_state & SS_ISFCONNECTING)
1262 {
1263 ret = recv(so->s, (char *)&ret, 0, 0);
1264
1265 if (ret < 0)
1266 {
1267 /* XXX */
1268 if ( errno == EAGAIN
1269 || errno == EWOULDBLOCK
1270 || errno == EINPROGRESS
1271 || errno == ENOTCONN)
1272 {
1273 CONTINUE(tcp); /* Still connecting, continue */
1274 }
1275
1276 /* else failed */
1277 so->so_state = SS_NOFDREF;
1278
1279 /* tcp_input will take care of it */
1280 }
1281 else
1282 {
1283 ret = send(so->s, &ret, 0, 0);
1284 if (ret < 0)
1285 {
1286 /* XXX */
1287 if ( errno == EAGAIN
1288 || errno == EWOULDBLOCK
1289 || errno == EINPROGRESS
1290 || errno == ENOTCONN)
1291 {
1292 CONTINUE(tcp);
1293 }
1294 /* else failed */
1295 so->so_state = SS_NOFDREF;
1296 }
1297 else
1298 so->so_state &= ~SS_ISFCONNECTING;
1299
1300 }
1301 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1302 } /* SS_ISFCONNECTING */
1303#endif
1304 LOOP_LABEL(tcp, so, so_next);
1305 }
1306
1307 /*
1308 * Now UDP sockets.
1309 * Incoming packets are sent straight away, they're not buffered.
1310 * Incoming UDP data isn't buffered either.
1311 */
1312 QSOCKET_FOREACH(so, so_next, udp)
1313 /* { */
1314#ifdef VBOX_WITH_SLIRP_MT
1315 if ( so->so_state & SS_NOFDREF
1316 && so->so_deleted == 1)
1317 {
1318 struct socket *son, *sop = NULL;
1319 QSOCKET_LOCK(udb);
1320 if (so->so_next != NULL)
1321 {
1322 if (so->so_next != &udb)
1323 SOCKET_LOCK(so->so_next);
1324 son = so->so_next;
1325 }
1326 if ( so->so_prev != &udb
1327 && so->so_prev != NULL)
1328 {
1329 SOCKET_LOCK(so->so_prev);
1330 sop = so->so_prev;
1331 }
1332 QSOCKET_UNLOCK(udb);
1333 remque(pData, so);
1334 NSOCK_DEC();
1335 SOCKET_UNLOCK(so);
1336 SOCKET_LOCK_DESTROY(so);
1337 RTMemFree(so);
1338 so_next = son;
1339 if (sop != NULL)
1340 SOCKET_UNLOCK(sop);
1341 CONTINUE_NO_UNLOCK(udp);
1342 }
1343#endif
1344 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1345
1346 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1347
1348 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1349 {
1350 SORECVFROM(pData, so);
1351 }
1352 LOOP_LABEL(udp, so, so_next);
1353 }
1354
1355done:
1356
1357 STAM_PROFILE_STOP(&pData->StatPoll, a);
1358}
1359
1360
1361struct arphdr
1362{
1363 unsigned short ar_hrd; /* format of hardware address */
1364 unsigned short ar_pro; /* format of protocol address */
1365 unsigned char ar_hln; /* length of hardware address */
1366 unsigned char ar_pln; /* length of protocol address */
1367 unsigned short ar_op; /* ARP opcode (command) */
1368
1369 /*
1370 * Ethernet looks like this : This bit is variable sized however...
1371 */
1372 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1373 unsigned char ar_sip[4]; /* sender IP address */
1374 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1375 unsigned char ar_tip[4]; /* target IP address */
1376};
1377AssertCompileSize(struct arphdr, 28);
1378
1379static void arp_input(PNATState pData, struct mbuf *m)
1380{
1381 struct ethhdr *eh;
1382 struct ethhdr *reh;
1383 struct arphdr *ah;
1384 struct arphdr *rah;
1385 int ar_op;
1386 uint32_t htip;
1387 uint32_t tip;
1388 struct mbuf *mr;
1389 eh = mtod(m, struct ethhdr *);
1390 ah = (struct arphdr *)&eh[1];
1391 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1392 tip = *(uint32_t*)ah->ar_tip;
1393
1394 ar_op = RT_N2H_U16(ah->ar_op);
1395
1396 switch (ar_op)
1397 {
1398 case ARPOP_REQUEST:
1399 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1400 if (mr == NULL)
1401 return;
1402 reh = mtod(mr, struct ethhdr *);
1403 mr->m_data += ETH_HLEN;
1404 rah = mtod(mr, struct arphdr *);
1405 mr->m_len = sizeof(struct arphdr);
1406 Assert(mr);
1407 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1408#ifdef VBOX_WITH_NAT_SERVICE
1409 if (tip == pData->special_addr.s_addr)
1410 goto arp_ok;
1411#endif
1412 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1413 {
1414 if ( CTL_CHECK(htip, CTL_DNS)
1415 || CTL_CHECK(htip, CTL_ALIAS)
1416 || CTL_CHECK(htip, CTL_TFTP))
1417 goto arp_ok;
1418 m_freem(pData, m);
1419 m_freem(pData, mr);
1420 return;
1421
1422 arp_ok:
1423 rah->ar_hrd = RT_H2N_U16_C(1);
1424 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1425 rah->ar_hln = ETH_ALEN;
1426 rah->ar_pln = 4;
1427 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1428 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1429
1430 switch (htip & ~pData->netmask)
1431 {
1432 case CTL_DNS:
1433 case CTL_ALIAS:
1434 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1435 break;
1436 default:;
1437 }
1438
1439 memcpy(rah->ar_sip, ah->ar_tip, 4);
1440 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1441 memcpy(rah->ar_tip, ah->ar_sip, 4);
1442 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1443 m_freem(pData, m);
1444 }
1445 /* Gratuitous ARP */
1446 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1447 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1448 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1449 {
1450 /* we've received anounce about address asignment
1451 * Let's do ARP cache update
1452 */
1453 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]) == 0)
1454 {
1455 m_freem(pData, mr);
1456 m_freem(pData, m);
1457 break;
1458 }
1459 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1460 }
1461 break;
1462
1463 case ARPOP_REPLY:
1464 if (slirp_arp_cache_update(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]) == 0)
1465 {
1466 m_freem(pData, m);
1467 break;
1468 }
1469 slirp_arp_cache_add(pData, *(uint32_t *)ah->ar_sip, ah->ar_sha);
1470 m_freem(pData, m);
1471 break;
1472
1473 default:
1474 break;
1475 }
1476}
1477
1478/**
1479 * Feed a packet into the slirp engine.
1480 *
1481 * @param m Data buffer, m_len is not valid.
1482 * @param cbBuf The length of the data in m.
1483 */
1484void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1485{
1486 int proto;
1487 static bool fWarnedIpv6;
1488 struct ethhdr *eh;
1489 uint8_t au8Ether[ETH_ALEN];
1490
1491 m->m_len = cbBuf;
1492 if (cbBuf < ETH_HLEN)
1493 {
1494 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1495 m_freem(pData, m);
1496 return;
1497 }
1498 eh = mtod(m, struct ethhdr *);
1499 proto = RT_N2H_U16(eh->h_proto);
1500
1501 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1502
1503 switch(proto)
1504 {
1505 case ETH_P_ARP:
1506 arp_input(pData, m);
1507 break;
1508
1509 case ETH_P_IP:
1510 /* Update time. Important if the network is very quiet, as otherwise
1511 * the first outgoing connection gets an incorrect timestamp. */
1512 updtime(pData);
1513 m_adj(m, ETH_HLEN);
1514 M_ASSERTPKTHDR(m);
1515 m->m_pkthdr.header = mtod(m, void *);
1516 ip_input(pData, m);
1517 break;
1518
1519 case ETH_P_IPV6:
1520 m_freem(pData, m);
1521 if (!fWarnedIpv6)
1522 {
1523 LogRel(("NAT: IPv6 not supported\n"));
1524 fWarnedIpv6 = true;
1525 }
1526 break;
1527
1528 default:
1529 Log(("NAT: Unsupported protocol %x\n", proto));
1530 m_freem(pData, m);
1531 break;
1532 }
1533
1534 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1535 activate_port_forwarding(pData, au8Ether);
1536}
1537
1538/* output the IP packet to the ethernet device */
1539void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1540{
1541 struct ethhdr *eh;
1542 uint8_t *buf = NULL;
1543 size_t mlen = 0;
1544 STAM_PROFILE_START(&pData->StatIF_encap, a);
1545
1546 M_ASSERTPKTHDR(m);
1547 m->m_data -= ETH_HLEN;
1548 m->m_len += ETH_HLEN;
1549 eh = mtod(m, struct ethhdr *);
1550
1551 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1552 {
1553 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1554 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1555 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1556 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1557 {
1558 /* don't do anything */
1559 m_freem(pData, m);
1560 goto done;
1561 }
1562 }
1563 mlen = m_length(m, NULL);
1564 buf = RTMemAlloc(mlen);
1565 if (buf == NULL)
1566 {
1567 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1568 m_freem(pData, m);
1569 goto done;
1570 }
1571 eh->h_proto = RT_H2N_U16(eth_proto);
1572 m_copydata(m, 0, mlen, (char *)buf);
1573 if (flags & ETH_ENCAP_URG)
1574 slirp_urg_output(pData->pvUser, m, buf, mlen);
1575 else
1576 slirp_output(pData->pvUser, m, buf, mlen);
1577done:
1578 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1579}
1580
1581/**
1582 * Still we're using dhcp server leasing to map ether to IP
1583 * @todo see rt_lookup_in_cache
1584 */
1585static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1586{
1587 uint32_t ip = INADDR_ANY;
1588 int rc;
1589
1590 if (eth_addr == NULL)
1591 return INADDR_ANY;
1592
1593 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1594 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1595 return INADDR_ANY;
1596
1597 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1598 if (RT_SUCCESS(rc))
1599 return ip;
1600
1601 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1602 /* ignore return code, ip will be set to INADDR_ANY on error */
1603 return ip;
1604}
1605
1606/**
1607 * We need check if we've activated port forwarding
1608 * for specific machine ... that of course relates to
1609 * service mode
1610 * @todo finish this for service case
1611 */
1612static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1613{
1614 struct port_forward_rule *rule, *tmp;
1615
1616 /* check mac here */
1617 LIST_FOREACH_SAFE(rule, &pData->port_forward_rule_head, list, tmp)
1618 {
1619 struct socket *so;
1620 struct alias_link *alias_link;
1621 struct libalias *lib;
1622 int flags;
1623 struct sockaddr sa;
1624 struct sockaddr_in *psin;
1625 socklen_t socketlen;
1626 struct in_addr alias;
1627 int rc;
1628 uint32_t guest_addr; /* need to understand if we already give address to guest */
1629
1630 if (rule->activated)
1631 continue;
1632
1633#ifdef VBOX_WITH_NAT_SERVICE
1634 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1635 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1636 guest_addr = find_guest_ip(pData, h_source);
1637#else
1638#if 0
1639 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1640 continue;
1641#endif
1642 guest_addr = find_guest_ip(pData, h_source);
1643#endif
1644 if (guest_addr == INADDR_ANY)
1645 {
1646 /* the address wasn't granted */
1647 return;
1648 }
1649
1650#if !defined(VBOX_WITH_NAT_SERVICE)
1651 if (rule->guest_addr.s_addr != guest_addr)
1652 continue;
1653#endif
1654
1655 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1656 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1657 rule->host_port, rule->guest_port, &guest_addr));
1658
1659 if (rule->proto == IPPROTO_UDP)
1660 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1661 RT_H2N_U16(rule->guest_port), 0);
1662 else
1663 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1664 RT_H2N_U16(rule->guest_port), 0);
1665
1666 if (so == NULL)
1667 goto remove_port_forwarding;
1668
1669 psin = (struct sockaddr_in *)&sa;
1670 psin->sin_family = AF_INET;
1671 psin->sin_port = 0;
1672 psin->sin_addr.s_addr = INADDR_ANY;
1673 socketlen = sizeof(struct sockaddr);
1674
1675 rc = getsockname(so->s, &sa, &socketlen);
1676 if (rc < 0 || sa.sa_family != AF_INET)
1677 goto remove_port_forwarding;
1678
1679 psin = (struct sockaddr_in *)&sa;
1680
1681 lib = LibAliasInit(pData, NULL);
1682 flags = LibAliasSetMode(lib, 0, 0);
1683 flags |= pData->i32AliasMode;
1684 flags |= PKT_ALIAS_REVERSE; /* set reverse */
1685 flags = LibAliasSetMode(lib, flags, ~0);
1686
1687 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1688 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1689 alias, RT_H2N_U16(rule->guest_port),
1690 pData->special_addr, -1, /* not very clear for now */
1691 rule->proto);
1692 if (!alias_link)
1693 goto remove_port_forwarding;
1694
1695 so->so_la = lib;
1696 rule->activated = 1;
1697 pData->cRedirectionsActive++;
1698 continue;
1699
1700 remove_port_forwarding:
1701 LogRel(("NAT: failed to redirect %s %d => %d\n",
1702 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1703 LIST_REMOVE(rule, list);
1704 pData->cRedirectionsStored--;
1705 RTMemFree(rule);
1706 }
1707}
1708
1709/**
1710 * Changes in 3.1 instead of opening new socket do the following:
1711 * gain more information:
1712 * 1. bind IP
1713 * 2. host port
1714 * 3. guest port
1715 * 4. proto
1716 * 5. guest MAC address
1717 * the guest's MAC address is rather important for service, but we easily
1718 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1719 * corresponding port-forwarding
1720 */
1721int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1722 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1723{
1724 struct port_forward_rule *rule = NULL;
1725 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1726
1727 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1728 if (rule == NULL)
1729 return 1;
1730
1731 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1732 rule->host_port = host_port;
1733 rule->guest_port = guest_port;
1734#ifndef VBOX_WITH_NAT_SERVICE
1735 rule->guest_addr.s_addr = guest_addr.s_addr;
1736#endif
1737 rule->bind_ip.s_addr = host_addr.s_addr;
1738 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1739 /* @todo add mac address */
1740 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1741 pData->cRedirectionsStored++;
1742 return 0;
1743}
1744
1745void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1746{
1747#ifndef VBOX_WITH_NAT_SERVICE
1748 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1749#endif
1750 if (GuestIP != INADDR_ANY)
1751 {
1752 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1753 activate_port_forwarding(pData, ethaddr);
1754 }
1755}
1756
1757#if defined(RT_OS_WINDOWS)
1758HANDLE *slirp_get_events(PNATState pData)
1759{
1760 return pData->phEvents;
1761}
1762void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1763{
1764 pData->phEvents[index] = hEvent;
1765}
1766#endif
1767
1768unsigned int slirp_get_timeout_ms(PNATState pData)
1769{
1770 if (link_up)
1771 {
1772 if (time_fasttimo)
1773 return 2;
1774 if (do_slowtimo)
1775 return 500; /* see PR_SLOWHZ */
1776 }
1777 return 3600*1000; /* one hour */
1778}
1779
1780#ifndef RT_OS_WINDOWS
1781int slirp_get_nsock(PNATState pData)
1782{
1783 return pData->nsock;
1784}
1785#endif
1786
1787/*
1788 * this function called from NAT thread
1789 */
1790void slirp_post_sent(PNATState pData, void *pvArg)
1791{
1792 struct socket *so = 0;
1793 struct tcpcb *tp = 0;
1794 struct mbuf *m = (struct mbuf *)pvArg;
1795 m_freem(pData, m);
1796}
1797#ifdef VBOX_WITH_SLIRP_MT
1798void slirp_process_queue(PNATState pData)
1799{
1800 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1801}
1802void *slirp_get_queue(PNATState pData)
1803{
1804 return pData->pReqQueue;
1805}
1806#endif
1807
1808void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1809{
1810 Log2(("tftp_prefix:%s\n", tftpPrefix));
1811 tftp_prefix = tftpPrefix;
1812}
1813
1814void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1815{
1816 Log2(("bootFile:%s\n", bootFile));
1817 bootp_filename = bootFile;
1818}
1819
1820void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1821{
1822 Log2(("next_server:%s\n", next_server));
1823 if (next_server == NULL)
1824 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1825 else
1826 inet_aton(next_server, &pData->tftp_server);
1827}
1828
1829int slirp_set_binding_address(PNATState pData, char *addr)
1830{
1831 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1832 {
1833 pData->bindIP.s_addr = INADDR_ANY;
1834 return 1;
1835 }
1836 return 0;
1837}
1838
1839void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1840{
1841 if (!pData->fUseHostResolver)
1842 {
1843 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1844 pData->fUseDnsProxy = fDNSProxy;
1845 }
1846 else
1847 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1848}
1849
1850#define CHECK_ARG(name, val, lim_min, lim_max) \
1851 do { \
1852 if ((val) < (lim_min) || (val) > (lim_max)) \
1853 { \
1854 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1855 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1856 return; \
1857 } \
1858 else \
1859 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1860 } while (0)
1861
1862/* don't allow user set less 8kB and more than 1M values */
1863#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1864void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1865{
1866 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1867 pData->socket_rcv = kilobytes;
1868}
1869void slirp_set_sndbuf(PNATState pData, int kilobytes)
1870{
1871 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1872 pData->socket_snd = kilobytes * _1K;
1873}
1874void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1875{
1876 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1877 tcp_rcvspace = kilobytes * _1K;
1878}
1879void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1880{
1881 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1882 tcp_sndspace = kilobytes * _1K;
1883}
1884
1885/*
1886 * Looking for Ether by ip in ARP-cache
1887 * Note: it´s responsible of caller to allocate buffer for result
1888 * @returns iprt status code
1889 */
1890int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1891{
1892 struct arp_cache_entry *ac;
1893
1894 if (ether == NULL)
1895 return VERR_INVALID_PARAMETER;
1896
1897 if (LIST_EMPTY(&pData->arp_cache))
1898 return VERR_NOT_FOUND;
1899
1900 LIST_FOREACH(ac, &pData->arp_cache, list)
1901 {
1902 if (ac->ip == ip)
1903 {
1904 memcpy(ether, ac->ether, ETH_ALEN);
1905 return VINF_SUCCESS;
1906 }
1907 }
1908 return VERR_NOT_FOUND;
1909}
1910
1911/*
1912 * Looking for IP by Ether in ARP-cache
1913 * Note: it´s responsible of caller to allocate buffer for result
1914 * @returns 0 - if found, 1 - otherwise
1915 */
1916int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1917{
1918 struct arp_cache_entry *ac;
1919 *ip = INADDR_ANY;
1920
1921 if (LIST_EMPTY(&pData->arp_cache))
1922 return VERR_NOT_FOUND;
1923
1924 LIST_FOREACH(ac, &pData->arp_cache, list)
1925 {
1926 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
1927 {
1928 *ip = ac->ip;
1929 return VINF_SUCCESS;
1930 }
1931 }
1932 return VERR_NOT_FOUND;
1933}
1934
1935void slirp_arp_who_has(PNATState pData, uint32_t dst)
1936{
1937 struct mbuf *m;
1938 struct ethhdr *ehdr;
1939 struct arphdr *ahdr;
1940
1941 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1942 if (m == NULL)
1943 {
1944 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
1945 return;
1946 }
1947 ehdr = mtod(m, struct ethhdr *);
1948 memset(ehdr->h_source, 0xff, ETH_ALEN);
1949 ahdr = (struct arphdr *)&ehdr[1];
1950 ahdr->ar_hrd = RT_H2N_U16_C(1);
1951 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1952 ahdr->ar_hln = ETH_ALEN;
1953 ahdr->ar_pln = 4;
1954 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
1955 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
1956 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
1957 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
1958 *(uint32_t *)ahdr->ar_tip = dst;
1959 /* warn!!! should falls in mbuf minimal size */
1960 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
1961 m->m_data += ETH_HLEN;
1962 m->m_len -= ETH_HLEN;
1963 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
1964}
1965
1966int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
1967{
1968 if (slirp_arp_cache_update(pData, dst, mac))
1969 slirp_arp_cache_add(pData, dst, mac);
1970
1971 return 0;
1972}
1973
1974/* updates the arp cache
1975 * @returns 0 - if has found and updated
1976 * 1 - if hasn't found.
1977 */
1978int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
1979{
1980 struct arp_cache_entry *ac;
1981 LIST_FOREACH(ac, &pData->arp_cache, list)
1982 {
1983 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
1984 {
1985 ac->ip = dst;
1986 return 0;
1987 }
1988 }
1989 return 1;
1990}
1991
1992void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
1993{
1994 struct arp_cache_entry *ac = NULL;
1995 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
1996 if (ac == NULL)
1997 {
1998 LogRel(("NAT: Can't allocate arp cache entry\n"));
1999 return;
2000 }
2001 ac->ip = ip;
2002 memcpy(ac->ether, ether, ETH_ALEN);
2003 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
2004}
2005
2006void slirp_set_mtu(PNATState pData, int mtu)
2007{
2008 if (mtu < 20 || mtu >= 16000)
2009 {
2010 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2011 mtu = 1500;
2012 }
2013 if_mtu =
2014 if_mru = mtu;
2015}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette