VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/slirp.c@ 30353

Last change on this file since 30353 was 30353, checked in by vboxsync, 14 years ago

NAT: fixed m_freem() in arp_input()

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.5 KB
Line 
1/* $Id: slirp.c 30353 2010-06-22 08:09:41Z vboxsync $ */
2/** @file
3 * NAT - slirp glue.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * libslirp glue
22 *
23 * Copyright (c) 2004-2008 Fabrice Bellard
24 *
25 * Permission is hereby granted, free of charge, to any person obtaining a copy
26 * of this software and associated documentation files (the "Software"), to deal
27 * in the Software without restriction, including without limitation the rights
28 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29 * copies of the Software, and to permit persons to whom the Software is
30 * furnished to do so, subject to the following conditions:
31 *
32 * The above copyright notice and this permission notice shall be included in
33 * all copies or substantial portions of the Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
41 * THE SOFTWARE.
42 */
43
44#include "slirp.h"
45#ifdef RT_OS_OS2
46# include <paths.h>
47#endif
48
49#include <VBox/err.h>
50#include <VBox/pdmdrv.h>
51#include <iprt/assert.h>
52#include <iprt/file.h>
53#ifndef RT_OS_WINDOWS
54# include <sys/ioctl.h>
55# include <poll.h>
56#else
57# include <Winnls.h>
58# define _WINSOCK2API_
59# include <IPHlpApi.h>
60#endif
61#include <alias.h>
62
63#ifndef RT_OS_WINDOWS
64
65# define DO_ENGAGE_EVENT1(so, fdset, label) \
66 do { \
67 if ( so->so_poll_index != -1 \
68 && so->s == polls[so->so_poll_index].fd) \
69 { \
70 polls[so->so_poll_index].events |= N_(fdset ## _poll); \
71 break; \
72 } \
73 AssertRelease(poll_index < (nfds)); \
74 AssertRelease(poll_index >= 0 && poll_index < (nfds)); \
75 polls[poll_index].fd = (so)->s; \
76 (so)->so_poll_index = poll_index; \
77 polls[poll_index].events = N_(fdset ## _poll); \
78 polls[poll_index].revents = 0; \
79 poll_index++; \
80 } while (0)
81
82# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
83 do { \
84 if ( so->so_poll_index != -1 \
85 && so->s == polls[so->so_poll_index].fd) \
86 { \
87 polls[so->so_poll_index].events |= \
88 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
89 break; \
90 } \
91 AssertRelease(poll_index < (nfds)); \
92 polls[poll_index].fd = (so)->s; \
93 (so)->so_poll_index = poll_index; \
94 polls[poll_index].events = \
95 N_(fdset1 ## _poll) | N_(fdset2 ## _poll); \
96 poll_index++; \
97 } while (0)
98
99# define DO_POLL_EVENTS(rc, error, so, events, label) do {} while (0)
100
101/*
102 * DO_CHECK_FD_SET is used in dumping events on socket, including POLLNVAL.
103 * gcc warns about attempts to log POLLNVAL so construction in a last to lines
104 * used to catch POLLNVAL while logging and return false in case of error while
105 * normal usage.
106 */
107# define DO_CHECK_FD_SET(so, events, fdset) \
108 ( ((so)->so_poll_index != -1) \
109 && ((so)->so_poll_index <= ndfs) \
110 && ((so)->s == polls[so->so_poll_index].fd) \
111 && (polls[(so)->so_poll_index].revents & N_(fdset ## _poll)) \
112 && ( N_(fdset ## _poll) == POLLNVAL \
113 || !(polls[(so)->so_poll_index].revents & POLLNVAL)))
114
115 /* specific for Unix API */
116# define DO_UNIX_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
117 /* specific for Windows Winsock API */
118# define DO_WIN_CHECK_FD_SET(so, events, fdset) 0
119
120# ifndef RT_OS_LINUX
121# define readfds_poll (POLLRDNORM)
122# define writefds_poll (POLLWRNORM)
123# else
124# define readfds_poll (POLLIN)
125# define writefds_poll (POLLOUT)
126# endif
127# define xfds_poll (POLLPRI)
128# define closefds_poll (POLLHUP)
129# define rderr_poll (POLLERR)
130# define rdhup_poll (POLLHUP)
131# define nval_poll (POLLNVAL)
132
133# define ICMP_ENGAGE_EVENT(so, fdset) \
134 do { \
135 if (pData->icmp_socket.s != -1) \
136 DO_ENGAGE_EVENT1((so), fdset, ICMP); \
137 } while (0)
138
139#else /* RT_OS_WINDOWS */
140
141/*
142 * On Windows, we will be notified by IcmpSendEcho2() when the response arrives.
143 * So no call to WSAEventSelect necessary.
144 */
145# define ICMP_ENGAGE_EVENT(so, fdset) do {} while (0)
146
147/*
148 * On Windows we use FD_ALL_EVENTS to ensure that we don't miss any event.
149 */
150# define DO_ENGAGE_EVENT1(so, fdset1, label) \
151 do { \
152 rc = WSAEventSelect((so)->s, VBOX_SOCKET_EVENT, FD_ALL_EVENTS); \
153 if (rc == SOCKET_ERROR) \
154 { \
155 /* This should not happen */ \
156 error = WSAGetLastError(); \
157 LogRel(("WSAEventSelect (" #label ") error %d (so=%x, socket=%s, event=%x)\n", \
158 error, (so), (so)->s, VBOX_SOCKET_EVENT)); \
159 } \
160 } while (0); \
161 CONTINUE(label)
162
163# define DO_ENGAGE_EVENT2(so, fdset1, fdset2, label) \
164 DO_ENGAGE_EVENT1((so), (fdset1), label)
165
166# define DO_POLL_EVENTS(rc, error, so, events, label) \
167 (rc) = WSAEnumNetworkEvents((so)->s, VBOX_SOCKET_EVENT, (events)); \
168 if ((rc) == SOCKET_ERROR) \
169 { \
170 (error) = WSAGetLastError(); \
171 LogRel(("WSAEnumNetworkEvents " #label " error %d\n", (error))); \
172 CONTINUE(label); \
173 }
174
175# define acceptds_win FD_ACCEPT
176# define acceptds_win_bit FD_ACCEPT_BIT
177# define readfds_win FD_READ
178# define readfds_win_bit FD_READ_BIT
179# define writefds_win FD_WRITE
180# define writefds_win_bit FD_WRITE_BIT
181# define xfds_win FD_OOB
182# define xfds_win_bit FD_OOB_BIT
183# define closefds_win FD_CLOSE
184# define closefds_win_bit FD_CLOSE_BIT
185
186# define closefds_win FD_CLOSE
187# define closefds_win_bit FD_CLOSE_BIT
188
189# define DO_CHECK_FD_SET(so, events, fdset) \
190 (((events).lNetworkEvents & fdset ## _win) && ((events).iErrorCode[fdset ## _win_bit] == 0))
191
192# define DO_WIN_CHECK_FD_SET(so, events, fdset) DO_CHECK_FD_SET((so), (events), fdset)
193# define DO_UNIX_CHECK_FD_SET(so, events, fdset) 1 /*specific for Unix API */
194
195#endif /* RT_OS_WINDOWS */
196
197#define TCP_ENGAGE_EVENT1(so, fdset) \
198 DO_ENGAGE_EVENT1((so), fdset, tcp)
199
200#define TCP_ENGAGE_EVENT2(so, fdset1, fdset2) \
201 DO_ENGAGE_EVENT2((so), fdset1, fdset2, tcp)
202
203#define UDP_ENGAGE_EVENT(so, fdset) \
204 DO_ENGAGE_EVENT1((so), fdset, udp)
205
206#define POLL_TCP_EVENTS(rc, error, so, events) \
207 DO_POLL_EVENTS((rc), (error), (so), (events), tcp)
208
209#define POLL_UDP_EVENTS(rc, error, so, events) \
210 DO_POLL_EVENTS((rc), (error), (so), (events), udp)
211
212#define CHECK_FD_SET(so, events, set) \
213 (DO_CHECK_FD_SET((so), (events), set))
214
215#define WIN_CHECK_FD_SET(so, events, set) \
216 (DO_WIN_CHECK_FD_SET((so), (events), set))
217
218#define UNIX_CHECK_FD_SET(so, events, set) \
219 (DO_UNIX_CHECK_FD_SET(so, events, set))
220
221/*
222 * Loging macros
223 */
224#if VBOX_WITH_DEBUG_NAT_SOCKETS
225# if defined(RT_OS_WINDOWS)
226# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
227 do { \
228 LogRel((" " #proto " %R[natsock] %R[natwinnetevents]\n", (so), (winevent))); \
229 } while (0)
230# else /* !RT_OS_WINDOWS */
231# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
232 do { \
233 LogRel((" " #proto " %R[natsock] %s %s %s er: %s, %s, %s\n", (so), \
234 CHECK_FD_SET(so, ign ,r_fdset) ? "READ":"", \
235 CHECK_FD_SET(so, ign, w_fdset) ? "WRITE":"", \
236 CHECK_FD_SET(so, ign, x_fdset) ? "OOB":"", \
237 CHECK_FD_SET(so, ign, rderr) ? "RDERR":"", \
238 CHECK_FD_SET(so, ign, rdhup) ? "RDHUP":"", \
239 CHECK_FD_SET(so, ign, nval) ? "RDNVAL":"")); \
240 } while (0)
241# endif /* !RT_OS_WINDOWS */
242#else /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
243# define DO_LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) do {} while (0)
244#endif /* !VBOX_WITH_DEBUG_NAT_SOCKETS */
245
246#define LOG_NAT_SOCK(so, proto, winevent, r_fdset, w_fdset, x_fdset) \
247 DO_LOG_NAT_SOCK((so), proto, (winevent), r_fdset, w_fdset, x_fdset)
248
249static void activate_port_forwarding(PNATState, const uint8_t *pEther);
250
251static const uint8_t special_ethaddr[6] =
252{
253 0x52, 0x54, 0x00, 0x12, 0x35, 0x00
254};
255
256static const uint8_t broadcast_ethaddr[6] =
257{
258 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
259};
260
261const uint8_t zerro_ethaddr[6] =
262{
263 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
264};
265
266#ifdef RT_OS_WINDOWS
267static int get_dns_addr_domain(PNATState pData, bool fVerbose,
268 struct in_addr *pdns_addr,
269 const char **ppszDomain)
270{
271 ULONG flags = GAA_FLAG_INCLUDE_PREFIX; /*GAA_FLAG_INCLUDE_ALL_INTERFACES;*/ /* all interfaces registered in NDIS */
272 PIP_ADAPTER_ADDRESSES pAdapterAddr = NULL;
273 PIP_ADAPTER_ADDRESSES pAddr = NULL;
274 PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsAddr = NULL;
275 ULONG size;
276 int wlen = 0;
277 char *pszSuffix;
278 struct dns_domain_entry *pDomain = NULL;
279 ULONG ret = ERROR_SUCCESS;
280
281 /* @todo add SKIPing flags to get only required information */
282
283 /* determine size of buffer */
284 size = 0;
285 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
286 if (ret != ERROR_BUFFER_OVERFLOW)
287 {
288 LogRel(("NAT: error %lu occurred on capacity detection operation\n", ret));
289 return -1;
290 }
291 if (size == 0)
292 {
293 LogRel(("NAT: Win socket API returns non capacity\n"));
294 return -1;
295 }
296
297 pAdapterAddr = RTMemAllocZ(size);
298 if (!pAdapterAddr)
299 {
300 LogRel(("NAT: No memory available \n"));
301 return -1;
302 }
303 ret = pData->pfGetAdaptersAddresses(AF_INET, 0, NULL /* reserved */, pAdapterAddr, &size);
304 if (ret != ERROR_SUCCESS)
305 {
306 LogRel(("NAT: error %lu occurred on fetching adapters info\n", ret));
307 RTMemFree(pAdapterAddr);
308 return -1;
309 }
310
311 for (pAddr = pAdapterAddr; pAddr != NULL; pAddr = pAddr->Next)
312 {
313 int found;
314 if (pAddr->OperStatus != IfOperStatusUp)
315 continue;
316
317 for (pDnsAddr = pAddr->FirstDnsServerAddress; pDnsAddr != NULL; pDnsAddr = pDnsAddr->Next)
318 {
319 struct sockaddr *SockAddr = pDnsAddr->Address.lpSockaddr;
320 struct in_addr InAddr;
321 struct dns_entry *pDns;
322
323 if (SockAddr->sa_family != AF_INET)
324 continue;
325
326 InAddr = ((struct sockaddr_in *)SockAddr)->sin_addr;
327
328 /* add dns server to list */
329 pDns = RTMemAllocZ(sizeof(struct dns_entry));
330 if (!pDns)
331 {
332 LogRel(("NAT: Can't allocate buffer for DNS entry\n"));
333 RTMemFree(pAdapterAddr);
334 return VERR_NO_MEMORY;
335 }
336
337 LogRel(("NAT: adding %R[IP4] to DNS server list\n", &InAddr));
338 if ((InAddr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
339 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
340 else
341 pDns->de_addr.s_addr = InAddr.s_addr;
342
343 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
344
345 if (pAddr->DnsSuffix == NULL)
346 continue;
347
348 /* uniq */
349 RTUtf16ToUtf8(pAddr->DnsSuffix, &pszSuffix);
350 if (!pszSuffix || strlen(pszSuffix) == 0)
351 {
352 RTStrFree(pszSuffix);
353 continue;
354 }
355
356 found = 0;
357 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
358 {
359 if ( pDomain->dd_pszDomain != NULL
360 && strcmp(pDomain->dd_pszDomain, pszSuffix) == 0)
361 {
362 found = 1;
363 RTStrFree(pszSuffix);
364 break;
365 }
366 }
367 if (!found)
368 {
369 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
370 if (!pDomain)
371 {
372 LogRel(("NAT: not enough memory\n"));
373 RTStrFree(pszSuffix);
374 RTMemFree(pAdapterAddr);
375 return VERR_NO_MEMORY;
376 }
377 pDomain->dd_pszDomain = pszSuffix;
378 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
379 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
380 }
381 }
382 }
383 RTMemFree(pAdapterAddr);
384 return 0;
385}
386
387#else /* !RT_OS_WINDOWS */
388
389static int RTFileGets(RTFILE File, void *pvBuf, size_t cbBufSize, size_t *pcbRead)
390{
391 size_t cbRead;
392 char bTest;
393 int rc = VERR_NO_MEMORY;
394 char *pu8Buf = (char *)pvBuf;
395 *pcbRead = 0;
396
397 while ( RT_SUCCESS(rc = RTFileRead(File, &bTest, 1, &cbRead))
398 && (pu8Buf - (char *)pvBuf) < cbBufSize)
399 {
400 if (cbRead == 0)
401 return VERR_EOF;
402
403 if (bTest == '\r' || bTest == '\n')
404 {
405 *pu8Buf = 0;
406 return VINF_SUCCESS;
407 }
408 *pu8Buf = bTest;
409 pu8Buf++;
410 (*pcbRead)++;
411 }
412 return rc;
413}
414
415static int get_dns_addr_domain(PNATState pData, bool fVerbose,
416 struct in_addr *pdns_addr,
417 const char **ppszDomain)
418{
419 char buff[512];
420 char buff2[256];
421 RTFILE f;
422 int cNameserversFound = 0;
423 int fWarnTooManyDnsServers = 0;
424 struct in_addr tmp_addr;
425 int rc;
426 size_t bytes;
427
428# ifdef RT_OS_OS2
429 /* Try various locations. */
430 char *etc = getenv("ETC");
431 if (etc)
432 {
433 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", etc);
434 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
435 }
436 if (RT_FAILURE(rc))
437 {
438 RTStrmPrintf(buff, sizeof(buff), "%s/RESOLV2", _PATH_ETC);
439 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
440 }
441 if (RT_FAILURE(rc))
442 {
443 RTStrmPrintf(buff, sizeof(buff), "%s/resolv.conf", _PATH_ETC);
444 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
445 }
446# else /* !RT_OS_OS2 */
447# ifndef DEBUG_vvl
448 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
449# else
450 char *home = getenv("HOME");
451 RTStrPrintf(buff, sizeof(buff), "%s/resolv.conf", home);
452 rc = RTFileOpen(&f, buff, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
453 if (RT_SUCCESS(rc))
454 {
455 Log(("NAT: DNS we're using %s\n", buff));
456 }
457 else
458 {
459 rc = RTFileOpen(&f, "/etc/resolv.conf", RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE);
460 Log(("NAT: DNS we're using %s\n", buff));
461 }
462# endif
463# endif /* !RT_OS_OS2 */
464 if (RT_FAILURE(rc))
465 return -1;
466
467 if (ppszDomain)
468 *ppszDomain = NULL;
469
470 Log(("NAT: DNS Servers:\n"));
471 while ( RT_SUCCESS(rc = RTFileGets(f, buff, sizeof(buff), &bytes))
472 && rc != VERR_EOF)
473 {
474 struct dns_entry *pDns = NULL;
475 if ( cNameserversFound == 4
476 && fWarnTooManyDnsServers == 0
477 && sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1)
478 {
479 fWarnTooManyDnsServers = 1;
480 LogRel(("NAT: too many nameservers registered.\n"));
481 }
482 if ( sscanf(buff, "nameserver%*[ \t]%255s", buff2) == 1
483 && cNameserversFound < 4) /* Unix doesn't accept more than 4 name servers*/
484 {
485 if (!inet_aton(buff2, &tmp_addr))
486 continue;
487
488 /* localhost mask */
489 pDns = RTMemAllocZ(sizeof (struct dns_entry));
490 if (!pDns)
491 {
492 LogRel(("can't alloc memory for DNS entry\n"));
493 return -1;
494 }
495
496 /* check */
497 pDns->de_addr.s_addr = tmp_addr.s_addr;
498 if ((pDns->de_addr.s_addr & RT_H2N_U32_C(IN_CLASSA_NET)) == RT_N2H_U32_C(INADDR_LOOPBACK & IN_CLASSA_NET))
499 {
500 pDns->de_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
501 }
502 TAILQ_INSERT_HEAD(&pData->pDnsList, pDns, de_list);
503 cNameserversFound++;
504 }
505 if ((!strncmp(buff, "domain", 6) || !strncmp(buff, "search", 6)))
506 {
507 char *tok;
508 char *saveptr;
509 struct dns_domain_entry *pDomain = NULL;
510 int fFoundDomain = 0;
511 tok = strtok_r(&buff[6], " \t\n", &saveptr);
512 LIST_FOREACH(pDomain, &pData->pDomainList, dd_list)
513 {
514 if ( tok != NULL
515 && strcmp(tok, pDomain->dd_pszDomain) == 0)
516 {
517 fFoundDomain = 1;
518 break;
519 }
520 }
521 if (tok != NULL && !fFoundDomain)
522 {
523 pDomain = RTMemAllocZ(sizeof(struct dns_domain_entry));
524 if (!pDomain)
525 {
526 LogRel(("NAT: not enought memory to add domain list\n"));
527 return VERR_NO_MEMORY;
528 }
529 pDomain->dd_pszDomain = RTStrDup(tok);
530 LogRel(("NAT: adding domain name %s to search list\n", pDomain->dd_pszDomain));
531 LIST_INSERT_HEAD(&pData->pDomainList, pDomain, dd_list);
532 }
533 }
534 }
535 RTFileClose(f);
536 if (!cNameserversFound)
537 return -1;
538 return 0;
539}
540
541#endif /* !RT_OS_WINDOWS */
542
543int slirp_init_dns_list(PNATState pData)
544{
545 TAILQ_INIT(&pData->pDnsList);
546 LIST_INIT(&pData->pDomainList);
547 return get_dns_addr_domain(pData, true, NULL, NULL);
548}
549
550void slirp_release_dns_list(PNATState pData)
551{
552 struct dns_entry *pDns = NULL;
553 struct dns_domain_entry *pDomain = NULL;
554
555 while (!TAILQ_EMPTY(&pData->pDnsList))
556 {
557 pDns = TAILQ_FIRST(&pData->pDnsList);
558 TAILQ_REMOVE(&pData->pDnsList, pDns, de_list);
559 RTMemFree(pDns);
560 }
561
562 while (!LIST_EMPTY(&pData->pDomainList))
563 {
564 pDomain = LIST_FIRST(&pData->pDomainList);
565 LIST_REMOVE(pDomain, dd_list);
566 if (pDomain->dd_pszDomain != NULL)
567 RTStrFree(pDomain->dd_pszDomain);
568 RTMemFree(pDomain);
569 }
570}
571
572int get_dns_addr(PNATState pData, struct in_addr *pdns_addr)
573{
574 return get_dns_addr_domain(pData, false, pdns_addr, NULL);
575}
576
577int slirp_init(PNATState *ppData, uint32_t u32NetAddr, uint32_t u32Netmask,
578 bool fPassDomain, bool fUseHostResolver, int i32AliasMode, void *pvUser)
579{
580 int fNATfailed = 0;
581 int rc;
582 PNATState pData = RTMemAllocZ(sizeof(NATState));
583 *ppData = pData;
584 if (!pData)
585 return VERR_NO_MEMORY;
586 if (u32Netmask & 0x1f)
587 /* CTL is x.x.x.15, bootp passes up to 16 IPs (15..31) */
588 return VERR_INVALID_PARAMETER;
589 pData->fPassDomain = !fUseHostResolver ? fPassDomain : false;
590 pData->fUseHostResolver = fUseHostResolver;
591 pData->pvUser = pvUser;
592 pData->netmask = u32Netmask;
593
594 /* sockets & TCP defaults */
595 pData->socket_rcv = 64 * _1K;
596 pData->socket_snd = 64 * _1K;
597 tcp_sndspace = 64 * _1K;
598 tcp_rcvspace = 64 * _1K;
599
600#ifdef RT_OS_WINDOWS
601 {
602 WSADATA Data;
603 WSAStartup(MAKEWORD(2, 0), &Data);
604 }
605 pData->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
606#endif
607#ifdef VBOX_WITH_SLIRP_MT
608 QSOCKET_LOCK_CREATE(tcb);
609 QSOCKET_LOCK_CREATE(udb);
610 rc = RTReqCreateQueue(&pData->pReqQueue);
611 AssertReleaseRC(rc);
612#endif
613
614 link_up = 1;
615
616 rc = bootp_dhcp_init(pData);
617 if (rc != 0)
618 {
619 LogRel(("NAT: DHCP server initialization was failed\n"));
620 return VINF_NAT_DNS;
621 }
622 debug_init();
623 if_init(pData);
624 ip_init(pData);
625 icmp_init(pData);
626
627 /* Initialise mbufs *after* setting the MTU */
628 mbuf_init(pData);
629
630 pData->special_addr.s_addr = u32NetAddr;
631 pData->slirp_ethaddr = &special_ethaddr[0];
632 alias_addr.s_addr = pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS);
633 /* @todo: add ability to configure this staff */
634
635 /* set default addresses */
636 inet_aton("127.0.0.1", &loopback_addr);
637 if (!pData->fUseHostResolver)
638 {
639 if (slirp_init_dns_list(pData) < 0)
640 fNATfailed = 1;
641
642 dnsproxy_init(pData);
643 }
644 if (i32AliasMode & ~(PKT_ALIAS_LOG|PKT_ALIAS_SAME_PORTS|PKT_ALIAS_PROXY_ONLY))
645 {
646 LogRel(("NAT: alias mode %x is ignored\n", i32AliasMode));
647 i32AliasMode = 0;
648 }
649 pData->i32AliasMode = i32AliasMode;
650 getouraddr(pData);
651 {
652 int flags = 0;
653 struct in_addr proxy_addr;
654 pData->proxy_alias = LibAliasInit(pData, NULL);
655 if (pData->proxy_alias == NULL)
656 {
657 LogRel(("NAT: LibAlias default rule wasn't initialized\n"));
658 AssertMsgFailed(("NAT: LibAlias default rule wasn't initialized\n"));
659 }
660 flags = LibAliasSetMode(pData->proxy_alias, 0, 0);
661#ifndef NO_FW_PUNCH
662 flags |= PKT_ALIAS_PUNCH_FW;
663#endif
664 flags |= pData->i32AliasMode; /* do transparent proxying */
665 flags = LibAliasSetMode(pData->proxy_alias, flags, ~0);
666 proxy_addr.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
667 LibAliasSetAddress(pData->proxy_alias, proxy_addr);
668 ftp_alias_load(pData);
669 nbt_alias_load(pData);
670 if (pData->fUseHostResolver)
671 dns_alias_load(pData);
672 }
673 return fNATfailed ? VINF_NAT_DNS : VINF_SUCCESS;
674}
675
676/**
677 * Register statistics.
678 */
679void slirp_register_statistics(PNATState pData, PPDMDRVINS pDrvIns)
680{
681#ifdef VBOX_WITH_STATISTICS
682# define PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
683# define COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pData, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
684# include "counters.h"
685# undef COUNTER
686/** @todo register statistics for the variables dumped by:
687 * ipstats(pData); tcpstats(pData); udpstats(pData); icmpstats(pData);
688 * mbufstats(pData); sockstats(pData); */
689#endif /* VBOX_WITH_STATISTICS */
690}
691
692/**
693 * Deregister statistics.
694 */
695void slirp_deregister_statistics(PNATState pData, PPDMDRVINS pDrvIns)
696{
697 if (pData == NULL)
698 return;
699#ifdef VBOX_WITH_STATISTICS
700# define PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
701# define COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pData)
702# include "counters.h"
703#endif /* VBOX_WITH_STATISTICS */
704}
705
706/**
707 * Marks the link as up, making it possible to establish new connections.
708 */
709void slirp_link_up(PNATState pData)
710{
711 struct arp_cache_entry *ac;
712 link_up = 1;
713
714 if (LIST_EMPTY(&pData->arp_cache))
715 return;
716
717 LIST_FOREACH(ac, &pData->arp_cache, list)
718 {
719 activate_port_forwarding(pData, ac->ether);
720 }
721}
722
723/**
724 * Marks the link as down and cleans up the current connections.
725 */
726void slirp_link_down(PNATState pData)
727{
728 struct socket *so;
729 struct port_forward_rule *rule;
730
731 while ((so = tcb.so_next) != &tcb)
732 {
733 if (so->so_state & SS_NOFDREF || so->s == -1)
734 sofree(pData, so);
735 else
736 tcp_drop(pData, sototcpcb(so), 0);
737 }
738
739 while ((so = udb.so_next) != &udb)
740 udp_detach(pData, so);
741
742 /*
743 * Clear the active state of port-forwarding rules to force
744 * re-setup on restoration of communications.
745 */
746 LIST_FOREACH(rule, &pData->port_forward_rule_head, list)
747 {
748 rule->activated = 0;
749 }
750 pData->cRedirectionsActive = 0;
751
752 link_up = 0;
753}
754
755/**
756 * Terminates the slirp component.
757 */
758void slirp_term(PNATState pData)
759{
760 if (pData == NULL)
761 return;
762#ifdef RT_OS_WINDOWS
763 pData->pfIcmpCloseHandle(pData->icmp_socket.sh);
764 FreeLibrary(pData->hmIcmpLibrary);
765 RTMemFree(pData->pvIcmpBuffer);
766#else
767 closesocket(pData->icmp_socket.s);
768#endif
769
770 slirp_link_down(pData);
771 slirp_release_dns_list(pData);
772 ftp_alias_unload(pData);
773 nbt_alias_unload(pData);
774 if (pData->fUseHostResolver)
775 dns_alias_unload(pData);
776 while (!LIST_EMPTY(&instancehead))
777 {
778 struct libalias *la = LIST_FIRST(&instancehead);
779 /* libalias do all clean up */
780 LibAliasUninit(la);
781 }
782 while (!LIST_EMPTY(&pData->arp_cache))
783 {
784 struct arp_cache_entry *ac = LIST_FIRST(&pData->arp_cache);
785 LIST_REMOVE(ac, list);
786 RTMemFree(ac);
787 }
788 bootp_dhcp_fini(pData);
789 m_fini(pData);
790#ifdef RT_OS_WINDOWS
791 WSACleanup();
792#endif
793#ifndef VBOX_WITH_SLIRP_BSD_SBUF
794#ifdef LOG_ENABLED
795 Log(("\n"
796 "NAT statistics\n"
797 "--------------\n"
798 "\n"));
799 ipstats(pData);
800 tcpstats(pData);
801 udpstats(pData);
802 icmpstats(pData);
803 mbufstats(pData);
804 sockstats(pData);
805 Log(("\n"
806 "\n"
807 "\n"));
808#endif
809#endif
810 RTMemFree(pData);
811}
812
813
814#define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
815#define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
816
817/*
818 * curtime kept to an accuracy of 1ms
819 */
820static void updtime(PNATState pData)
821{
822#ifdef RT_OS_WINDOWS
823 struct _timeb tb;
824
825 _ftime(&tb);
826 curtime = (u_int)tb.time * (u_int)1000;
827 curtime += (u_int)tb.millitm;
828#else
829 gettimeofday(&tt, 0);
830
831 curtime = (u_int)tt.tv_sec * (u_int)1000;
832 curtime += (u_int)tt.tv_usec / (u_int)1000;
833
834 if ((tt.tv_usec % 1000) >= 500)
835 curtime++;
836#endif
837}
838
839#ifdef RT_OS_WINDOWS
840void slirp_select_fill(PNATState pData, int *pnfds)
841#else /* RT_OS_WINDOWS */
842void slirp_select_fill(PNATState pData, int *pnfds, struct pollfd *polls)
843#endif /* !RT_OS_WINDOWS */
844{
845 struct socket *so, *so_next;
846 int nfds;
847#if defined(RT_OS_WINDOWS)
848 int rc;
849 int error;
850#else
851 int poll_index = 0;
852#endif
853 int i;
854
855 STAM_PROFILE_START(&pData->StatFill, a);
856
857 nfds = *pnfds;
858
859 /*
860 * First, TCP sockets
861 */
862 do_slowtimo = 0;
863 if (!link_up)
864 goto done;
865
866 /*
867 * *_slowtimo needs calling if there are IP fragments
868 * in the fragment queue, or there are TCP connections active
869 */
870 /* XXX:
871 * triggering of fragment expiration should be the same but use new macroses
872 */
873 do_slowtimo = (tcb.so_next != &tcb);
874 if (!do_slowtimo)
875 {
876 for (i = 0; i < IPREASS_NHASH; i++)
877 {
878 if (!TAILQ_EMPTY(&ipq[i]))
879 {
880 do_slowtimo = 1;
881 break;
882 }
883 }
884 }
885 /* always add the ICMP socket */
886#ifndef RT_OS_WINDOWS
887 pData->icmp_socket.so_poll_index = -1;
888#endif
889 ICMP_ENGAGE_EVENT(&pData->icmp_socket, readfds);
890
891 STAM_COUNTER_RESET(&pData->StatTCP);
892 STAM_COUNTER_RESET(&pData->StatTCPHot);
893
894 QSOCKET_FOREACH(so, so_next, tcp)
895 /* { */
896#if !defined(RT_OS_WINDOWS)
897 so->so_poll_index = -1;
898#endif
899 STAM_COUNTER_INC(&pData->StatTCP);
900
901 /*
902 * See if we need a tcp_fasttimo
903 */
904 if ( time_fasttimo == 0
905 && so->so_tcpcb != NULL
906 && so->so_tcpcb->t_flags & TF_DELACK)
907 {
908 time_fasttimo = curtime; /* Flag when we want a fasttimo */
909 }
910
911 /*
912 * NOFDREF can include still connecting to local-host,
913 * newly socreated() sockets etc. Don't want to select these.
914 */
915 if (so->so_state & SS_NOFDREF || so->s == -1)
916 CONTINUE(tcp);
917
918 /*
919 * Set for reading sockets which are accepting
920 */
921 if (so->so_state & SS_FACCEPTCONN)
922 {
923 STAM_COUNTER_INC(&pData->StatTCPHot);
924 TCP_ENGAGE_EVENT1(so, readfds);
925 CONTINUE(tcp);
926 }
927
928 /*
929 * Set for writing sockets which are connecting
930 */
931 if (so->so_state & SS_ISFCONNECTING)
932 {
933 Log2(("connecting %R[natsock] engaged\n",so));
934 STAM_COUNTER_INC(&pData->StatTCPHot);
935 TCP_ENGAGE_EVENT1(so, writefds);
936 }
937
938 /*
939 * Set for writing if we are connected, can send more, and
940 * we have something to send
941 */
942 if (CONN_CANFSEND(so) && SBUF_LEN(&so->so_rcv))
943 {
944 STAM_COUNTER_INC(&pData->StatTCPHot);
945 TCP_ENGAGE_EVENT1(so, writefds);
946 }
947
948 /*
949 * Set for reading (and urgent data) if we are connected, can
950 * receive more, and we have room for it XXX /2 ?
951 */
952 /* @todo: vvl - check which predicat here will be more useful here in rerm of new sbufs. */
953 if (CONN_CANFRCV(so) && (SBUF_LEN(&so->so_snd) < (SBUF_SIZE(&so->so_snd)/2)))
954 {
955 STAM_COUNTER_INC(&pData->StatTCPHot);
956 TCP_ENGAGE_EVENT2(so, readfds, xfds);
957 }
958 LOOP_LABEL(tcp, so, so_next);
959 }
960
961 /*
962 * UDP sockets
963 */
964 STAM_COUNTER_RESET(&pData->StatUDP);
965 STAM_COUNTER_RESET(&pData->StatUDPHot);
966
967 QSOCKET_FOREACH(so, so_next, udp)
968 /* { */
969
970 STAM_COUNTER_INC(&pData->StatUDP);
971#if !defined(RT_OS_WINDOWS)
972 so->so_poll_index = -1;
973#endif
974
975 /*
976 * See if it's timed out
977 */
978 if (so->so_expire)
979 {
980 if (so->so_expire <= curtime)
981 {
982 Log2(("NAT: %R[natsock] expired\n", so));
983 if (so->so_timeout != NULL)
984 {
985 so->so_timeout(pData, so, so->so_timeout_arg);
986 }
987#ifdef VBOX_WITH_SLIRP_MT
988 /* we need so_next for continue our cycle*/
989 so_next = so->so_next;
990#endif
991 UDP_DETACH(pData, so, so_next);
992 CONTINUE_NO_UNLOCK(udp);
993 }
994 else
995 {
996 do_slowtimo = 1; /* Let socket expire */
997 }
998 }
999
1000 /*
1001 * When UDP packets are received from over the link, they're
1002 * sendto()'d straight away, so no need for setting for writing
1003 * Limit the number of packets queued by this session to 4.
1004 * Note that even though we try and limit this to 4 packets,
1005 * the session could have more queued if the packets needed
1006 * to be fragmented.
1007 *
1008 * (XXX <= 4 ?)
1009 */
1010 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4)
1011 {
1012 STAM_COUNTER_INC(&pData->StatUDPHot);
1013 UDP_ENGAGE_EVENT(so, readfds);
1014 }
1015 LOOP_LABEL(udp, so, so_next);
1016 }
1017done:
1018
1019#if defined(RT_OS_WINDOWS)
1020 *pnfds = VBOX_EVENT_COUNT;
1021#else /* RT_OS_WINDOWS */
1022 AssertRelease(poll_index <= *pnfds);
1023 *pnfds = poll_index;
1024#endif /* !RT_OS_WINDOWS */
1025
1026 STAM_PROFILE_STOP(&pData->StatFill, a);
1027}
1028
1029#if defined(RT_OS_WINDOWS)
1030void slirp_select_poll(PNATState pData, int fTimeout, int fIcmp)
1031#else /* RT_OS_WINDOWS */
1032void slirp_select_poll(PNATState pData, struct pollfd *polls, int ndfs)
1033#endif /* !RT_OS_WINDOWS */
1034{
1035 struct socket *so, *so_next;
1036 int ret;
1037#if defined(RT_OS_WINDOWS)
1038 WSANETWORKEVENTS NetworkEvents;
1039 int rc;
1040 int error;
1041#else
1042 int poll_index = 0;
1043#endif
1044
1045 STAM_PROFILE_START(&pData->StatPoll, a);
1046
1047 /* Update time */
1048 updtime(pData);
1049
1050 /*
1051 * See if anything has timed out
1052 */
1053 if (link_up)
1054 {
1055 if (time_fasttimo && ((curtime - time_fasttimo) >= 2))
1056 {
1057 STAM_PROFILE_START(&pData->StatFastTimer, b);
1058 tcp_fasttimo(pData);
1059 time_fasttimo = 0;
1060 STAM_PROFILE_STOP(&pData->StatFastTimer, b);
1061 }
1062 if (do_slowtimo && ((curtime - last_slowtimo) >= 499))
1063 {
1064 STAM_PROFILE_START(&pData->StatSlowTimer, c);
1065 ip_slowtimo(pData);
1066 tcp_slowtimo(pData);
1067 last_slowtimo = curtime;
1068 STAM_PROFILE_STOP(&pData->StatSlowTimer, c);
1069 }
1070 }
1071#if defined(RT_OS_WINDOWS)
1072 if (fTimeout)
1073 return; /* only timer update */
1074#endif
1075
1076 /*
1077 * Check sockets
1078 */
1079 if (!link_up)
1080 goto done;
1081#if defined(RT_OS_WINDOWS)
1082 /*XXX: before renaming please make see define
1083 * fIcmp in slirp_state.h
1084 */
1085 if (fIcmp)
1086 sorecvfrom(pData, &pData->icmp_socket);
1087#else
1088 if ( (pData->icmp_socket.s != -1)
1089 && CHECK_FD_SET(&pData->icmp_socket, ignored, readfds))
1090 sorecvfrom(pData, &pData->icmp_socket);
1091#endif
1092 /*
1093 * Check TCP sockets
1094 */
1095 QSOCKET_FOREACH(so, so_next, tcp)
1096 /* { */
1097
1098#ifdef VBOX_WITH_SLIRP_MT
1099 if ( so->so_state & SS_NOFDREF
1100 && so->so_deleted == 1)
1101 {
1102 struct socket *son, *sop = NULL;
1103 QSOCKET_LOCK(tcb);
1104 if (so->so_next != NULL)
1105 {
1106 if (so->so_next != &tcb)
1107 SOCKET_LOCK(so->so_next);
1108 son = so->so_next;
1109 }
1110 if ( so->so_prev != &tcb
1111 && so->so_prev != NULL)
1112 {
1113 SOCKET_LOCK(so->so_prev);
1114 sop = so->so_prev;
1115 }
1116 QSOCKET_UNLOCK(tcb);
1117 remque(pData, so);
1118 NSOCK_DEC();
1119 SOCKET_UNLOCK(so);
1120 SOCKET_LOCK_DESTROY(so);
1121 RTMemFree(so);
1122 so_next = son;
1123 if (sop != NULL)
1124 SOCKET_UNLOCK(sop);
1125 CONTINUE_NO_UNLOCK(tcp);
1126 }
1127#endif
1128 /*
1129 * FD_ISSET is meaningless on these sockets
1130 * (and they can crash the program)
1131 */
1132 if (so->so_state & SS_NOFDREF || so->s == -1)
1133 CONTINUE(tcp);
1134
1135 POLL_TCP_EVENTS(rc, error, so, &NetworkEvents);
1136
1137 LOG_NAT_SOCK(so, TCP, &NetworkEvents, readfds, writefds, xfds);
1138
1139
1140 /*
1141 * Check for URG data
1142 * This will soread as well, so no need to
1143 * test for readfds below if this succeeds
1144 */
1145
1146 /* out-of-band data */
1147 if ( CHECK_FD_SET(so, NetworkEvents, xfds)
1148#ifdef RT_OS_DARWIN
1149 /* Darwin and probably BSD hosts generates POLLPRI|POLLHUP event on receiving TCP.flags.{ACK|URG|FIN} this
1150 * combination on other Unixs hosts doesn't enter to this branch
1151 */
1152 && !CHECK_FD_SET(so, NetworkEvents, closefds)
1153#endif
1154 )
1155 {
1156 sorecvoob(pData, so);
1157 }
1158
1159 /*
1160 * Check sockets for reading
1161 */
1162 else if ( CHECK_FD_SET(so, NetworkEvents, readfds)
1163 || WIN_CHECK_FD_SET(so, NetworkEvents, acceptds))
1164 {
1165 /*
1166 * Check for incoming connections
1167 */
1168 if (so->so_state & SS_FACCEPTCONN)
1169 {
1170 TCP_CONNECT(pData, so);
1171 if (!CHECK_FD_SET(so, NetworkEvents, closefds))
1172 CONTINUE(tcp);
1173 }
1174
1175 ret = soread(pData, so);
1176 /* Output it if we read something */
1177 if (RT_LIKELY(ret > 0))
1178 TCP_OUTPUT(pData, sototcpcb(so));
1179 }
1180
1181 /*
1182 * Check for FD_CLOSE events.
1183 * in some cases once FD_CLOSE engaged on socket it could be flashed latter (for some reasons)
1184 */
1185 if ( CHECK_FD_SET(so, NetworkEvents, closefds)
1186 || (so->so_close == 1))
1187 {
1188 /*
1189 * drain the socket
1190 */
1191 for (;;)
1192 {
1193 ret = soread(pData, so);
1194 if (ret > 0)
1195 TCP_OUTPUT(pData, sototcpcb(so));
1196 else
1197 {
1198 Log2(("%R[natsock] errno %d:%s\n", so, errno, strerror(errno)));
1199 break;
1200 }
1201 }
1202 /* mark the socket for termination _after_ it was drained */
1203 so->so_close = 1;
1204 CONTINUE(tcp);
1205 }
1206
1207 /*
1208 * Check sockets for writing
1209 */
1210 if (CHECK_FD_SET(so, NetworkEvents, writefds))
1211 {
1212 /*
1213 * Check for non-blocking, still-connecting sockets
1214 */
1215 if (so->so_state & SS_ISFCONNECTING)
1216 {
1217 Log2(("connecting %R[natsock] catched\n", so));
1218 /* Connected */
1219 so->so_state &= ~SS_ISFCONNECTING;
1220
1221 /*
1222 * This should be probably guarded by PROBE_CONN too. Anyway,
1223 * we disable it on OS/2 because the below send call returns
1224 * EFAULT which causes the opened TCP socket to close right
1225 * after it has been opened and connected.
1226 */
1227#ifndef RT_OS_OS2
1228 ret = send(so->s, (const char *)&ret, 0, 0);
1229 if (ret < 0)
1230 {
1231 /* XXXXX Must fix, zero bytes is a NOP */
1232 if ( errno == EAGAIN
1233 || errno == EWOULDBLOCK
1234 || errno == EINPROGRESS
1235 || errno == ENOTCONN)
1236 CONTINUE(tcp);
1237
1238 /* else failed */
1239 so->so_state = SS_NOFDREF;
1240 }
1241 /* else so->so_state &= ~SS_ISFCONNECTING; */
1242#endif
1243
1244 /*
1245 * Continue tcp_input
1246 */
1247 TCP_INPUT(pData, (struct mbuf *)NULL, sizeof(struct ip), so);
1248 /* continue; */
1249 }
1250 else
1251 SOWRITE(ret, pData, so);
1252 /*
1253 * XXX If we wrote something (a lot), there could be the need
1254 * for a window update. In the worst case, the remote will send
1255 * a window probe to get things going again.
1256 */
1257 }
1258
1259 /*
1260 * Probe a still-connecting, non-blocking socket
1261 * to check if it's still alive
1262 */
1263#ifdef PROBE_CONN
1264 if (so->so_state & SS_ISFCONNECTING)
1265 {
1266 ret = recv(so->s, (char *)&ret, 0, 0);
1267
1268 if (ret < 0)
1269 {
1270 /* XXX */
1271 if ( errno == EAGAIN
1272 || errno == EWOULDBLOCK
1273 || errno == EINPROGRESS
1274 || errno == ENOTCONN)
1275 {
1276 CONTINUE(tcp); /* Still connecting, continue */
1277 }
1278
1279 /* else failed */
1280 so->so_state = SS_NOFDREF;
1281
1282 /* tcp_input will take care of it */
1283 }
1284 else
1285 {
1286 ret = send(so->s, &ret, 0, 0);
1287 if (ret < 0)
1288 {
1289 /* XXX */
1290 if ( errno == EAGAIN
1291 || errno == EWOULDBLOCK
1292 || errno == EINPROGRESS
1293 || errno == ENOTCONN)
1294 {
1295 CONTINUE(tcp);
1296 }
1297 /* else failed */
1298 so->so_state = SS_NOFDREF;
1299 }
1300 else
1301 so->so_state &= ~SS_ISFCONNECTING;
1302
1303 }
1304 TCP_INPUT((struct mbuf *)NULL, sizeof(struct ip),so);
1305 } /* SS_ISFCONNECTING */
1306#endif
1307 LOOP_LABEL(tcp, so, so_next);
1308 }
1309
1310 /*
1311 * Now UDP sockets.
1312 * Incoming packets are sent straight away, they're not buffered.
1313 * Incoming UDP data isn't buffered either.
1314 */
1315 QSOCKET_FOREACH(so, so_next, udp)
1316 /* { */
1317#ifdef VBOX_WITH_SLIRP_MT
1318 if ( so->so_state & SS_NOFDREF
1319 && so->so_deleted == 1)
1320 {
1321 struct socket *son, *sop = NULL;
1322 QSOCKET_LOCK(udb);
1323 if (so->so_next != NULL)
1324 {
1325 if (so->so_next != &udb)
1326 SOCKET_LOCK(so->so_next);
1327 son = so->so_next;
1328 }
1329 if ( so->so_prev != &udb
1330 && so->so_prev != NULL)
1331 {
1332 SOCKET_LOCK(so->so_prev);
1333 sop = so->so_prev;
1334 }
1335 QSOCKET_UNLOCK(udb);
1336 remque(pData, so);
1337 NSOCK_DEC();
1338 SOCKET_UNLOCK(so);
1339 SOCKET_LOCK_DESTROY(so);
1340 RTMemFree(so);
1341 so_next = son;
1342 if (sop != NULL)
1343 SOCKET_UNLOCK(sop);
1344 CONTINUE_NO_UNLOCK(udp);
1345 }
1346#endif
1347 POLL_UDP_EVENTS(rc, error, so, &NetworkEvents);
1348
1349 LOG_NAT_SOCK(so, UDP, &NetworkEvents, readfds, writefds, xfds);
1350
1351 if (so->s != -1 && CHECK_FD_SET(so, NetworkEvents, readfds))
1352 {
1353 SORECVFROM(pData, so);
1354 }
1355 LOOP_LABEL(udp, so, so_next);
1356 }
1357
1358done:
1359
1360 STAM_PROFILE_STOP(&pData->StatPoll, a);
1361}
1362
1363
1364struct arphdr
1365{
1366 unsigned short ar_hrd; /* format of hardware address */
1367 unsigned short ar_pro; /* format of protocol address */
1368 unsigned char ar_hln; /* length of hardware address */
1369 unsigned char ar_pln; /* length of protocol address */
1370 unsigned short ar_op; /* ARP opcode (command) */
1371
1372 /*
1373 * Ethernet looks like this : This bit is variable sized however...
1374 */
1375 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
1376 unsigned char ar_sip[4]; /* sender IP address */
1377 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
1378 unsigned char ar_tip[4]; /* target IP address */
1379};
1380AssertCompileSize(struct arphdr, 28);
1381
1382static void arp_input(PNATState pData, struct mbuf *m)
1383{
1384 struct ethhdr *eh;
1385 struct ethhdr *reh;
1386 struct arphdr *ah;
1387 struct arphdr *rah;
1388 int ar_op;
1389 uint32_t htip;
1390 uint32_t tip;
1391 struct mbuf *mr;
1392 eh = mtod(m, struct ethhdr *);
1393 ah = (struct arphdr *)&eh[1];
1394 htip = RT_N2H_U32(*(uint32_t*)ah->ar_tip);
1395 tip = *(uint32_t*)ah->ar_tip;
1396
1397 ar_op = RT_N2H_U16(ah->ar_op);
1398
1399 switch (ar_op)
1400 {
1401 case ARPOP_REQUEST:
1402 mr = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1403 if (mr == NULL)
1404 break;
1405 reh = mtod(mr, struct ethhdr *);
1406 mr->m_data += ETH_HLEN;
1407 rah = mtod(mr, struct arphdr *);
1408 mr->m_len = sizeof(struct arphdr);
1409 Assert(mr);
1410 memcpy(reh->h_source, eh->h_source, ETH_ALEN); /* XXX: if_encap will swap src and dst*/
1411#ifdef VBOX_WITH_NAT_SERVICE
1412 if (tip == pData->special_addr.s_addr)
1413 goto arp_ok;
1414#endif
1415 if ((htip & pData->netmask) == RT_N2H_U32(pData->special_addr.s_addr))
1416 {
1417 if ( CTL_CHECK(htip, CTL_DNS)
1418 || CTL_CHECK(htip, CTL_ALIAS)
1419 || CTL_CHECK(htip, CTL_TFTP))
1420 goto arp_ok;
1421 m_freem(pData, mr);
1422 break;
1423
1424 arp_ok:
1425 rah->ar_hrd = RT_H2N_U16_C(1);
1426 rah->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1427 rah->ar_hln = ETH_ALEN;
1428 rah->ar_pln = 4;
1429 rah->ar_op = RT_H2N_U16_C(ARPOP_REPLY);
1430 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN);
1431
1432 switch (htip & ~pData->netmask)
1433 {
1434 case CTL_DNS:
1435 case CTL_ALIAS:
1436 rah->ar_sha[5] = (uint8_t)(htip & ~pData->netmask);
1437 break;
1438 default:;
1439 }
1440
1441 memcpy(rah->ar_sip, ah->ar_tip, 4);
1442 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
1443 memcpy(rah->ar_tip, ah->ar_sip, 4);
1444 if_encap(pData, ETH_P_ARP, mr, ETH_ENCAP_URG);
1445 }
1446 else
1447 m_freem(pData, mr);
1448
1449 /* Gratuitous ARP */
1450 if ( *(uint32_t *)ah->ar_sip == *(uint32_t *)ah->ar_tip
1451 && memcmp(ah->ar_tha, broadcast_ethaddr, ETH_ALEN) == 0
1452 && memcmp(eh->h_dest, broadcast_ethaddr, ETH_ALEN) == 0)
1453 {
1454 /* we've received anounce about address asignment
1455 * Let's do ARP cache update
1456 */
1457 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_tip, &eh->h_dest[0]);
1458 }
1459 break;
1460
1461 case ARPOP_REPLY:
1462 slirp_arp_cache_update_or_add(pData, *(uint32_t *)ah->ar_sip, &ah->ar_sha[0]);
1463 break;
1464
1465 default:
1466 break;
1467 }
1468
1469 m_freem(pData, m);
1470}
1471
1472/**
1473 * Feed a packet into the slirp engine.
1474 *
1475 * @param m Data buffer, m_len is not valid.
1476 * @param cbBuf The length of the data in m.
1477 */
1478void slirp_input(PNATState pData, struct mbuf *m, size_t cbBuf)
1479{
1480 int proto;
1481 static bool fWarnedIpv6;
1482 struct ethhdr *eh;
1483 uint8_t au8Ether[ETH_ALEN];
1484
1485 m->m_len = cbBuf;
1486 if (cbBuf < ETH_HLEN)
1487 {
1488 LogRel(("NAT: packet having size %d has been ignored\n", m->m_len));
1489 m_freem(pData, m);
1490 return;
1491 }
1492 eh = mtod(m, struct ethhdr *);
1493 proto = RT_N2H_U16(eh->h_proto);
1494
1495 memcpy(au8Ether, eh->h_source, ETH_ALEN);
1496
1497 switch(proto)
1498 {
1499 case ETH_P_ARP:
1500 arp_input(pData, m);
1501 break;
1502
1503 case ETH_P_IP:
1504 /* Update time. Important if the network is very quiet, as otherwise
1505 * the first outgoing connection gets an incorrect timestamp. */
1506 updtime(pData);
1507 m_adj(m, ETH_HLEN);
1508 M_ASSERTPKTHDR(m);
1509 m->m_pkthdr.header = mtod(m, void *);
1510 ip_input(pData, m);
1511 break;
1512
1513 case ETH_P_IPV6:
1514 m_freem(pData, m);
1515 if (!fWarnedIpv6)
1516 {
1517 LogRel(("NAT: IPv6 not supported\n"));
1518 fWarnedIpv6 = true;
1519 }
1520 break;
1521
1522 default:
1523 Log(("NAT: Unsupported protocol %x\n", proto));
1524 m_freem(pData, m);
1525 break;
1526 }
1527
1528 if (pData->cRedirectionsActive != pData->cRedirectionsStored)
1529 activate_port_forwarding(pData, au8Ether);
1530}
1531
1532/* output the IP packet to the ethernet device */
1533void if_encap(PNATState pData, uint16_t eth_proto, struct mbuf *m, int flags)
1534{
1535 struct ethhdr *eh;
1536 uint8_t *buf = NULL;
1537 size_t mlen = 0;
1538 STAM_PROFILE_START(&pData->StatIF_encap, a);
1539
1540 M_ASSERTPKTHDR(m);
1541 m->m_data -= ETH_HLEN;
1542 m->m_len += ETH_HLEN;
1543 eh = mtod(m, struct ethhdr *);
1544
1545 if (memcmp(eh->h_source, special_ethaddr, ETH_ALEN) != 0)
1546 {
1547 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
1548 memcpy(eh->h_source, special_ethaddr, ETH_ALEN);
1549 Assert(memcmp(eh->h_dest, special_ethaddr, ETH_ALEN) != 0);
1550 if (memcmp(eh->h_dest, zerro_ethaddr, ETH_ALEN) == 0)
1551 {
1552 /* don't do anything */
1553 m_freem(pData, m);
1554 goto done;
1555 }
1556 }
1557 mlen = m_length(m, NULL);
1558 buf = RTMemAlloc(mlen);
1559 if (buf == NULL)
1560 {
1561 LogRel(("NAT: Can't alloc memory for outgoing buffer\n"));
1562 m_freem(pData, m);
1563 goto done;
1564 }
1565 eh->h_proto = RT_H2N_U16(eth_proto);
1566 m_copydata(m, 0, mlen, (char *)buf);
1567 if (flags & ETH_ENCAP_URG)
1568 slirp_urg_output(pData->pvUser, m, buf, mlen);
1569 else
1570 slirp_output(pData->pvUser, m, buf, mlen);
1571done:
1572 STAM_PROFILE_STOP(&pData->StatIF_encap, a);
1573}
1574
1575/**
1576 * Still we're using dhcp server leasing to map ether to IP
1577 * @todo see rt_lookup_in_cache
1578 */
1579static uint32_t find_guest_ip(PNATState pData, const uint8_t *eth_addr)
1580{
1581 uint32_t ip = INADDR_ANY;
1582 int rc;
1583
1584 if (eth_addr == NULL)
1585 return INADDR_ANY;
1586
1587 if ( memcmp(eth_addr, zerro_ethaddr, ETH_ALEN) == 0
1588 || memcmp(eth_addr, broadcast_ethaddr, ETH_ALEN) == 0)
1589 return INADDR_ANY;
1590
1591 rc = slirp_arp_lookup_ip_by_ether(pData, eth_addr, &ip);
1592 if (RT_SUCCESS(rc))
1593 return ip;
1594
1595 bootp_cache_lookup_ip_by_ether(pData, eth_addr, &ip);
1596 /* ignore return code, ip will be set to INADDR_ANY on error */
1597 return ip;
1598}
1599
1600/**
1601 * We need check if we've activated port forwarding
1602 * for specific machine ... that of course relates to
1603 * service mode
1604 * @todo finish this for service case
1605 */
1606static void activate_port_forwarding(PNATState pData, const uint8_t *h_source)
1607{
1608 struct port_forward_rule *rule, *tmp;
1609
1610 /* check mac here */
1611 LIST_FOREACH_SAFE(rule, &pData->port_forward_rule_head, list, tmp)
1612 {
1613 struct socket *so;
1614 struct alias_link *alias_link;
1615 struct libalias *lib;
1616 int flags;
1617 struct sockaddr sa;
1618 struct sockaddr_in *psin;
1619 socklen_t socketlen;
1620 struct in_addr alias;
1621 int rc;
1622 uint32_t guest_addr; /* need to understand if we already give address to guest */
1623
1624 if (rule->activated)
1625 continue;
1626
1627#ifdef VBOX_WITH_NAT_SERVICE
1628 if (memcmp(rule->mac_address, h_source, ETH_ALEN) != 0)
1629 continue; /*not right mac, @todo: it'd be better do the list port forwarding per mac */
1630 guest_addr = find_guest_ip(pData, h_source);
1631#else
1632#if 0
1633 if (memcmp(client_ethaddr, h_source, ETH_ALEN) != 0)
1634 continue;
1635#endif
1636 guest_addr = find_guest_ip(pData, h_source);
1637#endif
1638 if (guest_addr == INADDR_ANY)
1639 {
1640 /* the address wasn't granted */
1641 return;
1642 }
1643
1644#if !defined(VBOX_WITH_NAT_SERVICE)
1645 if (rule->guest_addr.s_addr != guest_addr)
1646 continue;
1647#endif
1648
1649 LogRel(("NAT: set redirect %s host port %d => guest port %d @ %R[IP4]\n",
1650 (rule->proto == IPPROTO_UDP?"UDP":"TCP"),
1651 rule->host_port, rule->guest_port, &guest_addr));
1652
1653 if (rule->proto == IPPROTO_UDP)
1654 so = udp_listen(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1655 RT_H2N_U16(rule->guest_port), 0);
1656 else
1657 so = solisten(pData, rule->bind_ip.s_addr, RT_H2N_U16(rule->host_port), guest_addr,
1658 RT_H2N_U16(rule->guest_port), 0);
1659
1660 if (so == NULL)
1661 goto remove_port_forwarding;
1662
1663 psin = (struct sockaddr_in *)&sa;
1664 psin->sin_family = AF_INET;
1665 psin->sin_port = 0;
1666 psin->sin_addr.s_addr = INADDR_ANY;
1667 socketlen = sizeof(struct sockaddr);
1668
1669 rc = getsockname(so->s, &sa, &socketlen);
1670 if (rc < 0 || sa.sa_family != AF_INET)
1671 goto remove_port_forwarding;
1672
1673 psin = (struct sockaddr_in *)&sa;
1674
1675 lib = LibAliasInit(pData, NULL);
1676 flags = LibAliasSetMode(lib, 0, 0);
1677 flags |= pData->i32AliasMode;
1678 flags |= PKT_ALIAS_REVERSE; /* set reverse */
1679 flags = LibAliasSetMode(lib, flags, ~0);
1680
1681 alias.s_addr = RT_H2N_U32(RT_N2H_U32(guest_addr) | CTL_ALIAS);
1682 alias_link = LibAliasRedirectPort(lib, psin->sin_addr, RT_H2N_U16(rule->host_port),
1683 alias, RT_H2N_U16(rule->guest_port),
1684 pData->special_addr, -1, /* not very clear for now */
1685 rule->proto);
1686 if (!alias_link)
1687 goto remove_port_forwarding;
1688
1689 so->so_la = lib;
1690 rule->activated = 1;
1691 pData->cRedirectionsActive++;
1692 continue;
1693
1694 remove_port_forwarding:
1695 LogRel(("NAT: failed to redirect %s %d => %d\n",
1696 (rule->proto == IPPROTO_UDP?"UDP":"TCP"), rule->host_port, rule->guest_port));
1697 LIST_REMOVE(rule, list);
1698 pData->cRedirectionsStored--;
1699 RTMemFree(rule);
1700 }
1701}
1702
1703/**
1704 * Changes in 3.1 instead of opening new socket do the following:
1705 * gain more information:
1706 * 1. bind IP
1707 * 2. host port
1708 * 3. guest port
1709 * 4. proto
1710 * 5. guest MAC address
1711 * the guest's MAC address is rather important for service, but we easily
1712 * could get it from VM configuration in DrvNAT or Service, the idea is activating
1713 * corresponding port-forwarding
1714 */
1715int slirp_redir(PNATState pData, int is_udp, struct in_addr host_addr, int host_port,
1716 struct in_addr guest_addr, int guest_port, const uint8_t *ethaddr)
1717{
1718 struct port_forward_rule *rule = NULL;
1719 Assert(memcmp(ethaddr, zerro_ethaddr, ETH_ALEN) == 0);
1720
1721 rule = RTMemAllocZ(sizeof(struct port_forward_rule));
1722 if (rule == NULL)
1723 return 1;
1724
1725 rule->proto = (is_udp ? IPPROTO_UDP : IPPROTO_TCP);
1726 rule->host_port = host_port;
1727 rule->guest_port = guest_port;
1728#ifndef VBOX_WITH_NAT_SERVICE
1729 rule->guest_addr.s_addr = guest_addr.s_addr;
1730#endif
1731 rule->bind_ip.s_addr = host_addr.s_addr;
1732 memcpy(rule->mac_address, ethaddr, ETH_ALEN);
1733 /* @todo add mac address */
1734 LIST_INSERT_HEAD(&pData->port_forward_rule_head, rule, list);
1735 pData->cRedirectionsStored++;
1736 return 0;
1737}
1738
1739void slirp_set_ethaddr_and_activate_port_forwarding(PNATState pData, const uint8_t *ethaddr, uint32_t GuestIP)
1740{
1741#ifndef VBOX_WITH_NAT_SERVICE
1742 memcpy(client_ethaddr, ethaddr, ETH_ALEN);
1743#endif
1744 if (GuestIP != INADDR_ANY)
1745 {
1746 slirp_arp_cache_update_or_add(pData, GuestIP, ethaddr);
1747 activate_port_forwarding(pData, ethaddr);
1748 }
1749}
1750
1751#if defined(RT_OS_WINDOWS)
1752HANDLE *slirp_get_events(PNATState pData)
1753{
1754 return pData->phEvents;
1755}
1756void slirp_register_external_event(PNATState pData, HANDLE hEvent, int index)
1757{
1758 pData->phEvents[index] = hEvent;
1759}
1760#endif
1761
1762unsigned int slirp_get_timeout_ms(PNATState pData)
1763{
1764 if (link_up)
1765 {
1766 if (time_fasttimo)
1767 return 2;
1768 if (do_slowtimo)
1769 return 500; /* see PR_SLOWHZ */
1770 }
1771 return 3600*1000; /* one hour */
1772}
1773
1774#ifndef RT_OS_WINDOWS
1775int slirp_get_nsock(PNATState pData)
1776{
1777 return pData->nsock;
1778}
1779#endif
1780
1781/*
1782 * this function called from NAT thread
1783 */
1784void slirp_post_sent(PNATState pData, void *pvArg)
1785{
1786 struct socket *so = 0;
1787 struct tcpcb *tp = 0;
1788 struct mbuf *m = (struct mbuf *)pvArg;
1789 m_freem(pData, m);
1790}
1791#ifdef VBOX_WITH_SLIRP_MT
1792void slirp_process_queue(PNATState pData)
1793{
1794 RTReqProcess(pData->pReqQueue, RT_INDEFINITE_WAIT);
1795}
1796void *slirp_get_queue(PNATState pData)
1797{
1798 return pData->pReqQueue;
1799}
1800#endif
1801
1802void slirp_set_dhcp_TFTP_prefix(PNATState pData, const char *tftpPrefix)
1803{
1804 Log2(("tftp_prefix:%s\n", tftpPrefix));
1805 tftp_prefix = tftpPrefix;
1806}
1807
1808void slirp_set_dhcp_TFTP_bootfile(PNATState pData, const char *bootFile)
1809{
1810 Log2(("bootFile:%s\n", bootFile));
1811 bootp_filename = bootFile;
1812}
1813
1814void slirp_set_dhcp_next_server(PNATState pData, const char *next_server)
1815{
1816 Log2(("next_server:%s\n", next_server));
1817 if (next_server == NULL)
1818 pData->tftp_server.s_addr = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_TFTP);
1819 else
1820 inet_aton(next_server, &pData->tftp_server);
1821}
1822
1823int slirp_set_binding_address(PNATState pData, char *addr)
1824{
1825 if (addr == NULL || (inet_aton(addr, &pData->bindIP) == 0))
1826 {
1827 pData->bindIP.s_addr = INADDR_ANY;
1828 return 1;
1829 }
1830 return 0;
1831}
1832
1833void slirp_set_dhcp_dns_proxy(PNATState pData, bool fDNSProxy)
1834{
1835 if (!pData->fUseHostResolver)
1836 {
1837 Log2(("NAT: DNS proxy switched %s\n", (fDNSProxy ? "on" : "off")));
1838 pData->fUseDnsProxy = fDNSProxy;
1839 }
1840 else
1841 LogRel(("NAT: Host Resolver conflicts with DNS proxy, the last one was forcely ignored\n"));
1842}
1843
1844#define CHECK_ARG(name, val, lim_min, lim_max) \
1845 do { \
1846 if ((val) < (lim_min) || (val) > (lim_max)) \
1847 { \
1848 LogRel(("NAT: (" #name ":%d) has been ignored, " \
1849 "because out of range (%d, %d)\n", (val), (lim_min), (lim_max))); \
1850 return; \
1851 } \
1852 else \
1853 LogRel(("NAT: (" #name ":%d)\n", (val))); \
1854 } while (0)
1855
1856/* don't allow user set less 8kB and more than 1M values */
1857#define _8K_1M_CHECK_ARG(name, val) CHECK_ARG(name, (val), 8, 1024)
1858void slirp_set_rcvbuf(PNATState pData, int kilobytes)
1859{
1860 _8K_1M_CHECK_ARG("SOCKET_RCVBUF", kilobytes);
1861 pData->socket_rcv = kilobytes;
1862}
1863void slirp_set_sndbuf(PNATState pData, int kilobytes)
1864{
1865 _8K_1M_CHECK_ARG("SOCKET_SNDBUF", kilobytes);
1866 pData->socket_snd = kilobytes * _1K;
1867}
1868void slirp_set_tcp_rcvspace(PNATState pData, int kilobytes)
1869{
1870 _8K_1M_CHECK_ARG("TCP_RCVSPACE", kilobytes);
1871 tcp_rcvspace = kilobytes * _1K;
1872}
1873void slirp_set_tcp_sndspace(PNATState pData, int kilobytes)
1874{
1875 _8K_1M_CHECK_ARG("TCP_SNDSPACE", kilobytes);
1876 tcp_sndspace = kilobytes * _1K;
1877}
1878
1879/*
1880 * Looking for Ether by ip in ARP-cache
1881 * Note: it´s responsible of caller to allocate buffer for result
1882 * @returns iprt status code
1883 */
1884int slirp_arp_lookup_ether_by_ip(PNATState pData, uint32_t ip, uint8_t *ether)
1885{
1886 struct arp_cache_entry *ac;
1887
1888 if (ether == NULL)
1889 return VERR_INVALID_PARAMETER;
1890
1891 if (LIST_EMPTY(&pData->arp_cache))
1892 return VERR_NOT_FOUND;
1893
1894 LIST_FOREACH(ac, &pData->arp_cache, list)
1895 {
1896 if (ac->ip == ip)
1897 {
1898 memcpy(ether, ac->ether, ETH_ALEN);
1899 return VINF_SUCCESS;
1900 }
1901 }
1902 return VERR_NOT_FOUND;
1903}
1904
1905/*
1906 * Looking for IP by Ether in ARP-cache
1907 * Note: it´s responsible of caller to allocate buffer for result
1908 * @returns 0 - if found, 1 - otherwise
1909 */
1910int slirp_arp_lookup_ip_by_ether(PNATState pData, const uint8_t *ether, uint32_t *ip)
1911{
1912 struct arp_cache_entry *ac;
1913 *ip = INADDR_ANY;
1914
1915 if (LIST_EMPTY(&pData->arp_cache))
1916 return VERR_NOT_FOUND;
1917
1918 LIST_FOREACH(ac, &pData->arp_cache, list)
1919 {
1920 if (memcmp(ether, ac->ether, ETH_ALEN) == 0)
1921 {
1922 *ip = ac->ip;
1923 return VINF_SUCCESS;
1924 }
1925 }
1926 return VERR_NOT_FOUND;
1927}
1928
1929void slirp_arp_who_has(PNATState pData, uint32_t dst)
1930{
1931 struct mbuf *m;
1932 struct ethhdr *ehdr;
1933 struct arphdr *ahdr;
1934
1935 m = m_getcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR);
1936 if (m == NULL)
1937 {
1938 LogRel(("NAT: Can't alloc mbuf for ARP request\n"));
1939 return;
1940 }
1941 ehdr = mtod(m, struct ethhdr *);
1942 memset(ehdr->h_source, 0xff, ETH_ALEN);
1943 ahdr = (struct arphdr *)&ehdr[1];
1944 ahdr->ar_hrd = RT_H2N_U16_C(1);
1945 ahdr->ar_pro = RT_H2N_U16_C(ETH_P_IP);
1946 ahdr->ar_hln = ETH_ALEN;
1947 ahdr->ar_pln = 4;
1948 ahdr->ar_op = RT_H2N_U16_C(ARPOP_REQUEST);
1949 memcpy(ahdr->ar_sha, special_ethaddr, ETH_ALEN);
1950 *(uint32_t *)ahdr->ar_sip = RT_H2N_U32(RT_N2H_U32(pData->special_addr.s_addr) | CTL_ALIAS);
1951 memset(ahdr->ar_tha, 0xff, ETH_ALEN); /*broadcast*/
1952 *(uint32_t *)ahdr->ar_tip = dst;
1953 /* warn!!! should falls in mbuf minimal size */
1954 m->m_len = sizeof(struct arphdr) + ETH_HLEN;
1955 m->m_data += ETH_HLEN;
1956 m->m_len -= ETH_HLEN;
1957 if_encap(pData, ETH_P_ARP, m, ETH_ENCAP_URG);
1958}
1959
1960int slirp_arp_cache_update_or_add(PNATState pData, uint32_t dst, const uint8_t *mac)
1961{
1962 if (slirp_arp_cache_update(pData, dst, mac))
1963 slirp_arp_cache_add(pData, dst, mac);
1964
1965 return 0;
1966}
1967
1968/* updates the arp cache
1969 * @returns 0 - if has found and updated
1970 * 1 - if hasn't found.
1971 */
1972int slirp_arp_cache_update(PNATState pData, uint32_t dst, const uint8_t *mac)
1973{
1974 struct arp_cache_entry *ac;
1975 LIST_FOREACH(ac, &pData->arp_cache, list)
1976 {
1977 if (memcmp(ac->ether, mac, ETH_ALEN) == 0)
1978 {
1979 ac->ip = dst;
1980 return 0;
1981 }
1982 }
1983 return 1;
1984}
1985
1986void slirp_arp_cache_add(PNATState pData, uint32_t ip, const uint8_t *ether)
1987{
1988 struct arp_cache_entry *ac = NULL;
1989 ac = RTMemAllocZ(sizeof(struct arp_cache_entry));
1990 if (ac == NULL)
1991 {
1992 LogRel(("NAT: Can't allocate arp cache entry\n"));
1993 return;
1994 }
1995 ac->ip = ip;
1996 memcpy(ac->ether, ether, ETH_ALEN);
1997 LIST_INSERT_HEAD(&pData->arp_cache, ac, list);
1998}
1999
2000void slirp_set_mtu(PNATState pData, int mtu)
2001{
2002 if (mtu < 20 || mtu >= 16000)
2003 {
2004 LogRel(("NAT: mtu(%d) is out of range (20;16000] mtu forcely assigned to 1500\n", mtu));
2005 mtu = 1500;
2006 }
2007 if_mtu =
2008 if_mru = mtu;
2009}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette