VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DrvNAT.cpp@ 28213

Last change on this file since 28213 was 28213, checked in by vboxsync, 15 years ago

PDMINETWORKUP,Drv*,Dev*: Eliminated pfnSendDeprecated.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 44.7 KB
Line 
1/* $Id: DrvNAT.cpp 28213 2010-04-12 15:15:51Z vboxsync $ */
2/** @file
3 * DrvNAT - NAT network transport driver.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_DRV_NAT
27#define __STDC_LIMIT_MACROS
28#define __STDC_CONSTANT_MACROS
29#include "slirp/libslirp.h"
30#include "slirp/ctl.h"
31#include <VBox/pdmdrv.h>
32#include <VBox/pdmnetifs.h>
33#include <VBox/pdmnetinline.h>
34#include <iprt/assert.h>
35#include <iprt/file.h>
36#include <iprt/mem.h>
37#include <iprt/string.h>
38#include <iprt/critsect.h>
39#include <iprt/cidr.h>
40#include <iprt/stream.h>
41#include <iprt/uuid.h>
42
43#include "Builtins.h"
44
45#ifndef RT_OS_WINDOWS
46# include <unistd.h>
47# include <fcntl.h>
48# include <poll.h>
49# include <errno.h>
50#endif
51#ifdef RT_OS_FREEBSD
52# include <netinet/in.h>
53#endif
54#include <iprt/semaphore.h>
55#include <iprt/req.h>
56
57#define COUNTERS_INIT
58#include "counters.h"
59
60
61/*******************************************************************************
62* Defined Constants And Macros *
63*******************************************************************************/
64
65/**
66 * @todo: This is a bad hack to prevent freezing the guest during high network
67 * activity. Windows host only. This needs to be fixed properly.
68 */
69#define VBOX_NAT_DELAY_HACK
70
71#define GET_EXTRADATA(pthis, node, name, rc, type, type_name, var) \
72do { \
73 (rc) = CFGMR3Query ## type((node), name, &(var)); \
74 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
75 return PDMDrvHlpVMSetError((pthis)->pDrvIns, (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \""name"\" " #type_name " failed"), \
76 (pthis)->pDrvIns->iInstance); \
77} while (0)
78
79#define GET_ED_STRICT(pthis, node, name, rc, type, type_name, var) \
80do { \
81 (rc) = CFGMR3Query ## type((node), name, &(var)); \
82 if (RT_FAILURE((rc))) \
83 return PDMDrvHlpVMSetError((pthis)->pDrvIns, (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \""name"\" " #type_name " failed"), \
84 (pthis)->pDrvIns->iInstance); \
85} while (0)
86
87#define GET_EXTRADATA_N(pthis, node, name, rc, type, type_name, var, var_size) \
88do { \
89 (rc) = CFGMR3Query ## type((node), name, &(var), var_size); \
90 if (RT_FAILURE((rc)) && (rc) != VERR_CFGM_VALUE_NOT_FOUND) \
91 return PDMDrvHlpVMSetError((pthis)->pDrvIns, (rc), RT_SRC_POS, N_("NAT#%d: configuration query for \""name"\" " #type_name " failed"), \
92 (pthis)->pDrvIns->iInstance); \
93} while (0)
94
95#define GET_BOOL(rc, pthis, node, name, var) \
96 GET_EXTRADATA(pthis, node, name, (rc), Bool, bolean, (var))
97#define GET_STRING(rc, pthis, node, name, var, var_size) \
98 GET_EXTRADATA_N(pthis, node, name, (rc), String, string, (var), (var_size))
99#define GET_STRING_ALLOC(rc, pthis, node, name, var) \
100 GET_EXTRADATA(pthis, node, name, (rc), StringAlloc, string, (var))
101#define GET_S32(rc, pthis, node, name, var) \
102 GET_EXTRADATA(pthis, node, name, (rc), S32, int, (var))
103#define GET_S32_STRICT(rc, pthis, node, name, var) \
104 GET_ED_STRICT(pthis, node, name, (rc), S32, int, (var))
105
106
107
108#define DO_GET_IP(rc, node, instance, status, x) \
109do { \
110 char sz##x[32]; \
111 GET_STRING((rc), (node), (instance), #x, sz ## x[0], sizeof(sz ## x)); \
112 if (rc != VERR_CFGM_VALUE_NOT_FOUND) \
113 (status) = inet_aton(sz ## x, &x); \
114} while (0)
115
116#define GETIP_DEF(rc, node, instance, x, def) \
117do \
118{ \
119 int status = 0; \
120 DO_GET_IP((rc), (node), (instance), status, x); \
121 if (status == 0 || rc == VERR_CFGM_VALUE_NOT_FOUND) \
122 x.s_addr = def; \
123} while (0)
124
125/*******************************************************************************
126* Structures and Typedefs *
127*******************************************************************************/
128/**
129 * NAT network transport driver instance data.
130 *
131 * @implements PDMINETWORKUP
132 */
133typedef struct DRVNAT
134{
135 /** The network interface. */
136 PDMINETWORKUP INetworkUp;
137 /** The port we're attached to. */
138 PPDMINETWORKDOWN pIAboveNet;
139 /** The network config of the port we're attached to. */
140 PPDMINETWORKCONFIG pIAboveConfig;
141 /** Pointer to the driver instance. */
142 PPDMDRVINS pDrvIns;
143 /** Link state */
144 PDMNETWORKLINKSTATE enmLinkState;
145 /** NAT state for this instance. */
146 PNATState pNATState;
147 /** TFTP directory prefix. */
148 char *pszTFTPPrefix;
149 /** Boot file name to provide in the DHCP server response. */
150 char *pszBootFile;
151 /** tftp server name to provide in the DHCP server response. */
152 char *pszNextServer;
153 /* polling thread */
154 PPDMTHREAD pSlirpThread;
155 /** Queue for NAT-thread-external events. */
156 PRTREQQUEUE pSlirpReqQueue;
157 /** The guest IP for port-forwarding. */
158 uint32_t GuestIP;
159 uint32_t alignment1;
160
161#ifdef VBOX_WITH_SLIRP_MT
162 PPDMTHREAD pGuestThread;
163#endif
164#ifndef RT_OS_WINDOWS
165 /** The write end of the control pipe. */
166 RTFILE PipeWrite;
167 /** The read end of the control pipe. */
168 RTFILE PipeRead;
169# if HC_ARCH_BITS == 32
170 /** Alignment padding. */
171 //uint32_t alignment2;
172# endif
173#else
174 /** for external notification */
175 HANDLE hWakeupEvent;
176#endif
177
178#define DRV_PROFILE_COUNTER(name, dsc) STAMPROFILE Stat ## name
179#define DRV_COUNTING_COUNTER(name, dsc) STAMCOUNTER Stat ## name
180#include "counters.h"
181 /** thread delivering packets for receiving by the guest */
182 PPDMTHREAD pRecvThread;
183 /** thread delivering urg packets for receiving by the guest */
184 PPDMTHREAD pUrgRecvThread;
185 /** event to wakeup the guest receive thread */
186 RTSEMEVENT EventRecv;
187 /** event to wakeup the guest urgent receive thread */
188 RTSEMEVENT EventUrgRecv;
189 /** Receive Req queue (deliver packets to the guest) */
190 PRTREQQUEUE pRecvReqQueue;
191 /** Receive Urgent Req queue (deliver packets to the guest) */
192 PRTREQQUEUE pUrgRecvReqQueue;
193
194 /* makes access to device func RecvAvail and Recv atomical */
195 RTCRITSECT csDevAccess;
196 volatile uint32_t cUrgPkt;
197 volatile uint32_t cPkt;
198} DRVNAT;
199AssertCompileMemberAlignment(DRVNAT, StatNATRecvWakeups, 8);
200/** Pointer the NAT driver instance data. */
201typedef DRVNAT *PDRVNAT;
202
203/**
204 * NAT queue item.
205 */
206typedef struct DRVNATQUEUITEM
207{
208 /** The core part owned by the queue manager. */
209 PDMQUEUEITEMCORE Core;
210 /** The buffer for output to guest. */
211 const uint8_t *pu8Buf;
212 /* size of buffer */
213 size_t cb;
214 void *mbuf;
215} DRVNATQUEUITEM;
216/** Pointer to a NAT queue item. */
217typedef DRVNATQUEUITEM *PDRVNATQUEUITEM;
218
219
220/*******************************************************************************
221* Internal Functions *
222*******************************************************************************/
223static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho);
224
225
226
227static DECLCALLBACK(int) drvNATRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
228{
229 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
230
231 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
232 return VINF_SUCCESS;
233
234 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
235 {
236 RTReqProcess(pThis->pRecvReqQueue, 0);
237 if (ASMAtomicReadU32(&pThis->cPkt) == 0)
238 RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
239 }
240 return VINF_SUCCESS;
241}
242
243
244static DECLCALLBACK(int) drvNATRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
245{
246 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
247 int rc;
248 rc = RTSemEventSignal(pThis->EventRecv);
249
250 STAM_COUNTER_INC(&pThis->StatNATRecvWakeups);
251 return VINF_SUCCESS;
252}
253
254static DECLCALLBACK(int) drvNATUrgRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
255{
256 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
257
258 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
259 return VINF_SUCCESS;
260
261 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
262 {
263 RTReqProcess(pThis->pUrgRecvReqQueue, 0);
264 if (ASMAtomicReadU32(&pThis->cUrgPkt) == 0)
265 {
266 int rc = RTSemEventWait(pThis->EventUrgRecv, RT_INDEFINITE_WAIT);
267 AssertRC(rc);
268 }
269 }
270 return VINF_SUCCESS;
271}
272
273static DECLCALLBACK(int) drvNATUrgRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
274{
275 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
276 int rc = RTSemEventSignal(pThis->EventUrgRecv);
277 AssertRC(rc);
278
279 return VINF_SUCCESS;
280}
281
282static DECLCALLBACK(void) drvNATUrgRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
283{
284 int rc = RTCritSectEnter(&pThis->csDevAccess);
285 AssertRC(rc);
286 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
287 if (RT_SUCCESS(rc))
288 {
289 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
290 AssertRC(rc);
291 }
292 else if ( RT_FAILURE(rc)
293 && ( rc == VERR_TIMEOUT
294 && rc == VERR_INTERRUPTED))
295 {
296 AssertRC(rc);
297 }
298
299 rc = RTCritSectLeave(&pThis->csDevAccess);
300 AssertRC(rc);
301
302 slirp_ext_m_free(pThis->pNATState, m);
303#ifdef VBOX_WITH_SLIRP_BSD_MBUF
304 RTMemFree(pu8Buf);
305#endif
306 if (ASMAtomicDecU32(&pThis->cUrgPkt) == 0)
307 {
308 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
309 drvNATNotifyNATThread(pThis, "drvNATUrgRecvWorker");
310 }
311}
312
313
314static DECLCALLBACK(void) drvNATRecvWorker(PDRVNAT pThis, uint8_t *pu8Buf, int cb, struct mbuf *m)
315{
316 int rc;
317 STAM_PROFILE_START(&pThis->StatNATRecv, a);
318
319 STAM_PROFILE_START(&pThis->StatNATRecvWait, b);
320
321 while (ASMAtomicReadU32(&pThis->cUrgPkt) != 0)
322 {
323 rc = RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
324 if ( RT_FAILURE(rc)
325 && ( rc == VERR_TIMEOUT
326 || rc == VERR_INTERRUPTED))
327 goto done_unlocked;
328 }
329
330 rc = RTCritSectEnter(&pThis->csDevAccess);
331 AssertRC(rc);
332
333 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
334 if (RT_SUCCESS(rc))
335 {
336 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pu8Buf, cb);
337 AssertRC(rc);
338 }
339 else if ( RT_FAILURE(rc)
340 && rc != VERR_TIMEOUT
341 && rc != VERR_INTERRUPTED)
342 {
343 AssertRC(rc);
344 }
345
346 rc = RTCritSectLeave(&pThis->csDevAccess);
347 AssertRC(rc);
348
349done_unlocked:
350 slirp_ext_m_free(pThis->pNATState, m);
351#ifdef VBOX_WITH_SLIRP_BSD_MBUF
352 RTMemFree(pu8Buf);
353#endif
354 ASMAtomicDecU32(&pThis->cPkt);
355
356 drvNATNotifyNATThread(pThis, "drvNATRecvWorker");
357
358 STAM_PROFILE_STOP(&pThis->StatNATRecvWait, b);
359 STAM_PROFILE_STOP(&pThis->StatNATRecv, a);
360}
361
362/**
363 * Frees a S/G buffer allocated by drvNATNetworkUp_AllocBuf.
364 *
365 * @param pThis Pointer to the NAT instance.
366 * @param pSgBuf The S/G buffer to free.
367 */
368static void drvNATFreeSgBuf(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
369{
370 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_MAGIC_MASK) == PDMSCATTERGATHER_FLAGS_MAGIC);
371 pSgBuf->fFlags = 0;
372 if (pSgBuf->pvAllocator)
373 {
374 Assert(!pSgBuf->pvUser);
375 slirp_ext_m_free(pThis->pNATState, (struct mbuf *)pSgBuf->pvAllocator);
376 pSgBuf->pvAllocator = NULL;
377 }
378 else if (pSgBuf->pvUser)
379 {
380 RTMemFree(pSgBuf->aSegs[0].pvSeg);
381 pSgBuf->aSegs[0].pvSeg = NULL;
382 RTMemFree(pSgBuf->pvUser);
383 pSgBuf->pvUser = NULL;
384 }
385 RTMemFree(pSgBuf);
386}
387
388/**
389 * Worker function for drvNATSend().
390 *
391 * @param pThis Pointer to the NAT instance.
392 * @param pSgBuf The scatter/gather buffer.
393 * @thread NAT
394 */
395static void drvNATSendWorker(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
396{
397 Assert(pThis->enmLinkState == PDMNETWORKLINKSTATE_UP);
398 if (pThis->enmLinkState == PDMNETWORKLINKSTATE_UP)
399 {
400 struct mbuf *m = (struct mbuf *)pSgBuf->pvAllocator;
401 if (m)
402 {
403 /*
404 * A normal frame.
405 */
406 pSgBuf->pvAllocator = NULL;
407 slirp_input(pThis->pNATState, m, pSgBuf->cbUsed);
408 }
409 else
410 {
411 /*
412 * GSO frame, need to segment it.
413 */
414 /** @todo Make the NAT engine grok large frames? Could be more efficient... */
415#if 0 /* this is for testing PDMNetGsoCarveSegmentQD. */
416 uint8_t abHdrScratch[256];
417#endif
418 uint8_t const *pbFrame = (uint8_t const *)pSgBuf->aSegs[0].pvSeg;
419 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pSgBuf->pvUser;
420 uint32_t const cSegs = PDMNetGsoCalcSegmentCount(pGso, pSgBuf->cbUsed); Assert(cSegs > 1);
421 for (size_t iSeg = 0; iSeg < cSegs; iSeg++)
422 {
423 size_t cbSeg;
424 void *pvSeg;
425 m = slirp_ext_m_get(pThis->pNATState, pGso->cbHdrs + pGso->cbMaxSeg, &pvSeg, &cbSeg);
426 if (!m)
427 break;
428
429#if 1
430 uint32_t cbPayload;
431 uint32_t offPayload = PDMNetGsoCarveSegment(pGso, pbFrame, pSgBuf->cbUsed,
432 iSeg, cSegs, (uint8_t *)pvSeg, &cbPayload);
433 memcpy((uint8_t *)pvSeg + pGso->cbHdrs, pbFrame + offPayload, cbPayload);
434
435 slirp_input(pThis->pNATState, m, cbPayload + pGso->cbHdrs);
436#else
437 uint32_t cbSegFrame;
438 void *pvSegFrame = PDMNetGsoCarveSegmentQD(pGso, (uint8_t *)pbFrame, pSgBuf->cbUsed, abHdrScratch,
439 iSeg, cSegs, &cbSegFrame);
440 memcpy((uint8_t *)pvSeg, pvSegFrame, cbSegFrame);
441
442 slirp_input(pThis->pNATState, m, cbSegFrame);
443#endif
444 }
445 }
446 }
447 drvNATFreeSgBuf(pThis, pSgBuf);
448
449 /** @todo Implement the VERR_TRY_AGAIN drvNATNetworkUp_AllocBuf sematics. */
450}
451
452/**
453 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
454 */
455static DECLCALLBACK(int) drvNATNetworkUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
456 PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
457{
458 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
459
460 /*
461 * Drop the incoming frame if the NAT thread isn't running.
462 */
463 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
464 {
465 Log(("drvNATNetowrkUp_AllocBuf: returns VERR_NET_NO_NETWORK\n"));
466 return VERR_NET_NO_NETWORK;
467 }
468
469 /*
470 * Allocate a scatter/gather buffer and an mbuf.
471 */
472 PPDMSCATTERGATHER pSgBuf = (PPDMSCATTERGATHER)RTMemAlloc(sizeof(*pSgBuf));
473 if (!pSgBuf)
474 return VERR_NO_MEMORY;
475 if (!pGso)
476 {
477 pSgBuf->pvUser = NULL;
478 pSgBuf->pvAllocator = slirp_ext_m_get(pThis->pNATState, cbMin,
479 &pSgBuf->aSegs[0].pvSeg, &pSgBuf->aSegs[0].cbSeg);
480 if (!pSgBuf->pvAllocator)
481 {
482 RTMemFree(pSgBuf);
483 /** @todo Implement the VERR_TRY_AGAIN semantics. */
484 return VERR_NO_MEMORY;
485 }
486 }
487 else
488 {
489 pSgBuf->pvUser = RTMemDup(pGso, sizeof(*pGso));
490 pSgBuf->pvAllocator = NULL;
491 pSgBuf->aSegs[0].cbSeg = RT_ALIGN_Z(cbMin, 16);
492 pSgBuf->aSegs[0].pvSeg = RTMemAlloc(pSgBuf->aSegs[0].cbSeg);
493 if (!pSgBuf->pvUser || !pSgBuf->aSegs[0].pvSeg)
494 {
495 RTMemFree(pSgBuf->aSegs[0].pvSeg);
496 RTMemFree(pSgBuf->pvUser);
497 RTMemFree(pSgBuf);
498 /** @todo Implement the VERR_TRY_AGAIN semantics. */
499 return VERR_NO_MEMORY;
500 }
501 }
502
503 /*
504 * Initialize the S/G buffer and return.
505 */
506 pSgBuf->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1;
507 pSgBuf->cbUsed = 0;
508 pSgBuf->cbAvailable = pSgBuf->aSegs[0].cbSeg;
509 pSgBuf->cSegs = 1;
510
511#if 0 /* poison */
512 memset(pSgBuf->aSegs[0].pvSeg, 'F', pSgBuf->aSegs[0].cbSeg);
513#endif
514 *ppSgBuf = pSgBuf;
515 return VINF_SUCCESS;
516}
517
518/**
519 * @interface_method_impl{PDMINETWORKUP,pfnFreeBuf}
520 */
521static DECLCALLBACK(int) drvNATNetworkUp_FreeBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf)
522{
523 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
524 drvNATFreeSgBuf(pThis, pSgBuf);
525 return VINF_SUCCESS;
526}
527
528/**
529 * @interface_method_impl{PDMINETWORKUP,pfnSendBuf}
530 */
531static DECLCALLBACK(int) drvNATNetworkUp_SendBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf, bool fOnWorkerThread)
532{
533 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
534 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_OWNER_MASK) == PDMSCATTERGATHER_FLAGS_OWNER_1);
535
536 int rc;
537 if (pThis->pSlirpThread->enmState == PDMTHREADSTATE_RUNNING)
538 {
539#ifdef VBOX_WITH_SLIRP_MT
540 PRTREQQUEUE pQueue = (PRTREQQUEUE)slirp_get_queue(pThis->pNATState);
541#else
542 PRTREQQUEUE pQueue = pThis->pSlirpReqQueue;
543#endif
544 rc = RTReqCallEx(pQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
545 (PFNRT)drvNATSendWorker, 2, pThis, pSgBuf);
546 if (RT_SUCCESS(rc))
547 {
548 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_SendBuf");
549 return VINF_SUCCESS;
550 }
551
552 rc = VERR_NET_NO_BUFFER_SPACE;
553 }
554 else
555 rc = VERR_NET_DOWN;
556 drvNATFreeSgBuf(pThis, pSgBuf);
557 return rc;
558}
559
560/**
561 * Get the NAT thread out of poll/WSAWaitForMultipleEvents
562 */
563static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho)
564{
565 int rc;
566#ifndef RT_OS_WINDOWS
567 /* kick poll() */
568 rc = RTFileWrite(pThis->PipeWrite, "", 1, NULL);
569#else
570 /* kick WSAWaitForMultipleEvents */
571 rc = WSASetEvent(pThis->hWakeupEvent);
572#endif
573 AssertRC(rc);
574}
575
576/**
577 * @interface_method_impl{PDMINETWORKUP,pfnSetPromiscuousMode}
578 */
579static DECLCALLBACK(void) drvNATNetworkUp_SetPromiscuousMode(PPDMINETWORKUP pInterface, bool fPromiscuous)
580{
581 LogFlow(("drvNATNetworkUp_SetPromiscuousMode: fPromiscuous=%d\n", fPromiscuous));
582 /* nothing to do */
583}
584
585/**
586 * Worker function for drvNATNetworkUp_NotifyLinkChanged().
587 * @thread "NAT" thread.
588 */
589static void drvNATNotifyLinkChangedWorker(PDRVNAT pThis, PDMNETWORKLINKSTATE enmLinkState)
590{
591 pThis->enmLinkState = enmLinkState;
592
593 switch (enmLinkState)
594 {
595 case PDMNETWORKLINKSTATE_UP:
596 LogRel(("NAT: link up\n"));
597 slirp_link_up(pThis->pNATState);
598 break;
599
600 case PDMNETWORKLINKSTATE_DOWN:
601 case PDMNETWORKLINKSTATE_DOWN_RESUME:
602 LogRel(("NAT: link down\n"));
603 slirp_link_down(pThis->pNATState);
604 break;
605
606 default:
607 AssertMsgFailed(("drvNATNetworkUp_NotifyLinkChanged: unexpected link state %d\n", enmLinkState));
608 }
609}
610
611/**
612 * Notification on link status changes.
613 *
614 * @param pInterface Pointer to the interface structure containing the called function pointer.
615 * @param enmLinkState The new link state.
616 * @thread EMT
617 */
618static DECLCALLBACK(void) drvNATNetworkUp_NotifyLinkChanged(PPDMINETWORKUP pInterface, PDMNETWORKLINKSTATE enmLinkState)
619{
620 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
621
622 LogFlow(("drvNATNetworkUp_NotifyLinkChanged: enmLinkState=%d\n", enmLinkState));
623
624 /* don't queue new requests when the NAT thread is about to stop */
625 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
626 return;
627
628 PRTREQ pReq;
629 int rc = RTReqCallEx(pThis->pSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
630 (PFNRT)drvNATNotifyLinkChangedWorker, 2, pThis, enmLinkState);
631 if (RT_LIKELY(rc == VERR_TIMEOUT))
632 {
633 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_NotifyLinkChanged");
634 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
635 AssertRC(rc);
636 }
637 else
638 AssertRC(rc);
639 RTReqFree(pReq);
640}
641
642/**
643 * NAT thread handling the slirp stuff.
644 *
645 * The slirp implementation is single-threaded so we execute this enginre in a
646 * dedicated thread. We take care that this thread does not become the
647 * bottleneck: If the guest wants to send, a request is enqueued into the
648 * pSlirpReqQueue and handled asynchronously by this thread. If this thread
649 * wants to deliver packets to the guest, it enqueues a request into
650 * pRecvReqQueue which is later handled by the Recv thread.
651 */
652static DECLCALLBACK(int) drvNATAsyncIoThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
653{
654 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
655 int nFDs = -1;
656 int ms;
657#ifdef RT_OS_WINDOWS
658 HANDLE *phEvents = slirp_get_events(pThis->pNATState);
659 unsigned int cBreak = 0;
660#else /* RT_OS_WINDOWS */
661 unsigned int cPollNegRet = 0;
662#endif /* !RT_OS_WINDOWS */
663
664 LogFlow(("drvNATAsyncIoThread: pThis=%p\n", pThis));
665
666 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
667 return VINF_SUCCESS;
668
669 /*
670 * Polling loop.
671 */
672 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
673 {
674 /*
675 * To prevent concurent execution of sending/receving threads
676 */
677#ifndef RT_OS_WINDOWS
678 nFDs = slirp_get_nsock(pThis->pNATState);
679 /* allocation for all sockets + Management pipe */
680 struct pollfd *polls = (struct pollfd *)RTMemAlloc((1 + nFDs) * sizeof(struct pollfd) + sizeof(uint32_t));
681 if (polls == NULL)
682 return VERR_NO_MEMORY;
683
684 /* don't pass the managemant pipe */
685 slirp_select_fill(pThis->pNATState, &nFDs, &polls[1]);
686
687 polls[0].fd = pThis->PipeRead;
688 /* POLLRDBAND usually doesn't used on Linux but seems used on Solaris */
689 polls[0].events = POLLRDNORM|POLLPRI|POLLRDBAND;
690 polls[0].revents = 0;
691
692 int cChangedFDs = poll(polls, nFDs + 1, slirp_get_timeout_ms(pThis->pNATState));
693 if (cChangedFDs < 0)
694 {
695 if (errno == EINTR)
696 {
697 Log2(("NAT: signal was caught while sleep on poll\n"));
698 /* No error, just process all outstanding requests but don't wait */
699 cChangedFDs = 0;
700 }
701 else if (cPollNegRet++ > 128)
702 {
703 LogRel(("NAT:Poll returns (%s) suppressed %d\n", strerror(errno), cPollNegRet));
704 cPollNegRet = 0;
705 }
706 }
707
708 if (cChangedFDs >= 0)
709 {
710 slirp_select_poll(pThis->pNATState, &polls[1], nFDs);
711 if (polls[0].revents & (POLLRDNORM|POLLPRI|POLLRDBAND))
712 {
713 /* drain the pipe */
714 char ch[1];
715 size_t cbRead;
716 int counter = 0;
717 /*
718 * drvNATSend decoupled so we don't know how many times
719 * device's thread sends before we've entered multiplex,
720 * so to avoid false alarm drain pipe here to the very end
721 *
722 * @todo: Probably we should counter drvNATSend to count how
723 * deep pipe has been filed before drain.
724 *
725 * XXX:Make it reading exactly we need to drain the pipe.
726 */
727 /** @todo use RTPipeCreate + RTPipeRead(,biggerbuffer) here, it's
728 * non-blocking. */
729 RTFileRead(pThis->PipeRead, &ch, 1, &cbRead);
730 }
731 }
732 /* process _all_ outstanding requests but don't wait */
733 RTReqProcess(pThis->pSlirpReqQueue, 0);
734 RTMemFree(polls);
735
736#else /* RT_OS_WINDOWS */
737 nFDs = -1;
738 slirp_select_fill(pThis->pNATState, &nFDs);
739 DWORD dwEvent = WSAWaitForMultipleEvents(nFDs, phEvents, FALSE,
740 slirp_get_timeout_ms(pThis->pNATState),
741 FALSE);
742 if ( (dwEvent < WSA_WAIT_EVENT_0 || dwEvent > WSA_WAIT_EVENT_0 + nFDs - 1)
743 && dwEvent != WSA_WAIT_TIMEOUT)
744 {
745 int error = WSAGetLastError();
746 LogRel(("NAT: WSAWaitForMultipleEvents returned %d (error %d)\n", dwEvent, error));
747 RTAssertPanic();
748 }
749
750 if (dwEvent == WSA_WAIT_TIMEOUT)
751 {
752 /* only check for slow/fast timers */
753 slirp_select_poll(pThis->pNATState, /* fTimeout=*/true, /*fIcmp=*/false);
754 continue;
755 }
756 /* poll the sockets in any case */
757 Log2(("%s: poll\n", __FUNCTION__));
758 slirp_select_poll(pThis->pNATState, /* fTimeout=*/false, /* fIcmp=*/(dwEvent == WSA_WAIT_EVENT_0));
759 /* process _all_ outstanding requests but don't wait */
760 RTReqProcess(pThis->pSlirpReqQueue, 0);
761# ifdef VBOX_NAT_DELAY_HACK
762 if (cBreak++ > 128)
763 {
764 cBreak = 0;
765 RTThreadSleep(2);
766 }
767# endif
768#endif /* RT_OS_WINDOWS */
769 }
770
771 return VINF_SUCCESS;
772}
773
774
775/**
776 * Unblock the send thread so it can respond to a state change.
777 *
778 * @returns VBox status code.
779 * @param pDevIns The pcnet device instance.
780 * @param pThread The send thread.
781 */
782static DECLCALLBACK(int) drvNATAsyncIoWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
783{
784 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
785
786 drvNATNotifyNATThread(pThis, "drvNATAsyncIoWakeup");
787 return VINF_SUCCESS;
788}
789
790#ifdef VBOX_WITH_SLIRP_MT
791
792static DECLCALLBACK(int) drvNATAsyncIoGuest(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
793{
794 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
795
796 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
797 return VINF_SUCCESS;
798
799 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
800 slirp_process_queue(pThis->pNATState);
801
802 return VINF_SUCCESS;
803}
804
805
806static DECLCALLBACK(int) drvNATAsyncIoGuestWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
807{
808 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
809
810 return VINF_SUCCESS;
811}
812
813#endif /* VBOX_WITH_SLIRP_MT */
814
815/**
816 * Function called by slirp to check if it's possible to feed incoming data to the network port.
817 * @returns 1 if possible.
818 * @returns 0 if not possible.
819 */
820int slirp_can_output(void *pvUser)
821{
822 return 1;
823}
824
825void slirp_push_recv_thread(void *pvUser)
826{
827 PDRVNAT pThis = (PDRVNAT)pvUser;
828 Assert(pThis);
829 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
830}
831
832void slirp_urg_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
833{
834 PDRVNAT pThis = (PDRVNAT)pvUser;
835 Assert(pThis);
836
837 PRTREQ pReq = NULL;
838
839 /* don't queue new requests when the NAT thread is about to stop */
840 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
841 return;
842
843 ASMAtomicIncU32(&pThis->cUrgPkt);
844 int rc = RTReqCallEx(pThis->pUrgRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
845 (PFNRT)drvNATUrgRecvWorker, 4, pThis, pu8Buf, cb, m);
846 AssertRC(rc);
847 drvNATUrgRecvWakeup(pThis->pDrvIns, pThis->pUrgRecvThread);
848}
849
850/**
851 * Function called by slirp to feed incoming data to the NIC.
852 */
853void slirp_output(void *pvUser, struct mbuf *m, const uint8_t *pu8Buf, int cb)
854{
855 PDRVNAT pThis = (PDRVNAT)pvUser;
856 Assert(pThis);
857
858 LogFlow(("slirp_output BEGIN %x %d\n", pu8Buf, cb));
859 Log2(("slirp_output: pu8Buf=%p cb=%#x (pThis=%p)\n%.*Rhxd\n", pu8Buf, cb, pThis, cb, pu8Buf));
860
861 PRTREQ pReq = NULL;
862
863 /* don't queue new requests when the NAT thread is about to stop */
864 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
865 return;
866
867 ASMAtomicIncU32(&pThis->cPkt);
868 int rc = RTReqCallEx(pThis->pRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
869 (PFNRT)drvNATRecvWorker, 4, pThis, pu8Buf, cb, m);
870 AssertRC(rc);
871 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
872 STAM_COUNTER_INC(&pThis->StatQueuePktSent);
873}
874
875
876/**
877 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
878 */
879static DECLCALLBACK(void *) drvNATQueryInterface(PPDMIBASE pInterface, const char *pszIID)
880{
881 PPDMDRVINS pDrvIns = PDMIBASE_2_PDMDRV(pInterface);
882 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
883
884 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pDrvIns->IBase);
885 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKUP, &pThis->INetworkUp);
886 return NULL;
887}
888
889
890/**
891 * Get the MAC address into the slirp stack.
892 *
893 * Called by drvNATLoadDone and drvNATPowerOn.
894 */
895static void drvNATSetMac(PDRVNAT pThis)
896{
897 if (pThis->pIAboveConfig)
898 {
899 RTMAC Mac;
900 pThis->pIAboveConfig->pfnGetMac(pThis->pIAboveConfig, &Mac);
901 /* Re-activate the port forwarding. If */
902 slirp_set_ethaddr_and_activate_port_forwarding(pThis->pNATState, Mac.au8, pThis->GuestIP);
903 }
904}
905
906
907/**
908 * After loading we have to pass the MAC address of the ethernet device to the slirp stack.
909 * Otherwise the guest is not reachable until it performs a DHCP request or an ARP request
910 * (usually done during guest boot).
911 */
912static DECLCALLBACK(int) drvNATLoadDone(PPDMDRVINS pDrvIns, PSSMHANDLE pSSMHandle)
913{
914 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
915 drvNATSetMac(pThis);
916 return VINF_SUCCESS;
917}
918
919
920/**
921 * Some guests might not use DHCP to retrieve an IP but use a static IP.
922 */
923static DECLCALLBACK(void) drvNATPowerOn(PPDMDRVINS pDrvIns)
924{
925 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
926 drvNATSetMac(pThis);
927}
928
929
930/**
931 * Sets up the redirectors.
932 *
933 * @returns VBox status code.
934 * @param pCfg The configuration handle.
935 */
936static int drvNATConstructRedir(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pCfg, RTIPV4ADDR Network)
937{
938 RTMAC Mac;
939 memset(&Mac, 0, sizeof(RTMAC)); /*can't get MAC here */
940 /*
941 * Enumerate redirections.
942 */
943 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pCfg); pNode; pNode = CFGMR3GetNextChild(pNode))
944 {
945 /*
946 * Validate the port forwarding config.
947 */
948 if (!CFGMR3AreValuesValid(pNode, "Protocol\0UDP\0HostPort\0GuestPort\0GuestIP\0BindIP\0"))
949 return PDMDRV_SET_ERROR(pThis->pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES, N_("Unknown configuration in port forwarding"));
950
951 /* protocol type */
952 bool fUDP;
953 char szProtocol[32];
954 int rc;
955 GET_STRING(rc, pThis, pNode, "Protocol", szProtocol[0], sizeof(szProtocol));
956 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
957 {
958 fUDP = false;
959 GET_BOOL(rc, pThis, pNode, "UDP", fUDP);
960 }
961 else if (RT_SUCCESS(rc))
962 {
963 if (!RTStrICmp(szProtocol, "TCP"))
964 fUDP = false;
965 else if (!RTStrICmp(szProtocol, "UDP"))
966 fUDP = true;
967 else
968 return PDMDrvHlpVMSetError(pThis->pDrvIns, VERR_INVALID_PARAMETER, RT_SRC_POS,
969 N_("NAT#%d: Invalid configuration value for \"Protocol\": \"%s\""),
970 iInstance, szProtocol);
971 }
972 /* host port */
973 int32_t iHostPort;
974 GET_S32_STRICT(rc, pThis, pNode, "HostPort", iHostPort);
975
976 /* guest port */
977 int32_t iGuestPort;
978 GET_S32_STRICT(rc, pThis, pNode, "GuestPort", iGuestPort);
979
980 /* guest address */
981 struct in_addr GuestIP;
982 /* @todo (vvl) use CTL_* */
983 GETIP_DEF(rc, pThis, pNode, GuestIP, htonl(Network | CTL_GUEST));
984
985 /* Store the guest IP for re-establishing the port-forwarding rules. Note that GuestIP
986 * is not documented. Without */
987 if (pThis->GuestIP == INADDR_ANY)
988 pThis->GuestIP = GuestIP.s_addr;
989
990 /*
991 * Call slirp about it.
992 */
993 struct in_addr BindIP;
994 GETIP_DEF(rc, pThis, pNode, BindIP, INADDR_ANY);
995 if (slirp_redir(pThis->pNATState, fUDP, BindIP, iHostPort, GuestIP, iGuestPort, Mac.au8) < 0)
996 return PDMDrvHlpVMSetError(pThis->pDrvIns, VERR_NAT_REDIR_SETUP, RT_SRC_POS,
997 N_("NAT#%d: configuration error: failed to set up "
998 "redirection of %d to %d. Probably a conflict with "
999 "existing services or other rules"), iInstance, iHostPort,
1000 iGuestPort);
1001 } /* for each redir rule */
1002
1003 return VINF_SUCCESS;
1004}
1005
1006
1007/**
1008 * Destruct a driver instance.
1009 *
1010 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
1011 * resources can be freed correctly.
1012 *
1013 * @param pDrvIns The driver instance data.
1014 */
1015static DECLCALLBACK(void) drvNATDestruct(PPDMDRVINS pDrvIns)
1016{
1017 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1018 LogFlow(("drvNATDestruct:\n"));
1019 PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);
1020
1021 if (pThis->pNATState)
1022 {
1023 slirp_term(pThis->pNATState);
1024 slirp_deregister_statistics(pThis->pNATState, pDrvIns);
1025#ifdef VBOX_WITH_STATISTICS
1026# define DRV_PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1027# define DRV_COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1028# include "counters.h"
1029#endif
1030 pThis->pNATState = NULL;
1031 }
1032}
1033
1034
1035/**
1036 * Construct a NAT network transport driver instance.
1037 *
1038 * @copydoc FNPDMDRVCONSTRUCT
1039 */
1040static DECLCALLBACK(int) drvNATConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags)
1041{
1042 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1043 LogFlow(("drvNATConstruct:\n"));
1044 PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns);
1045
1046 /*
1047 * Validate the config.
1048 */
1049 if (!CFGMR3AreValuesValid(pCfg,
1050 "PassDomain\0TFTPPrefix\0BootFile\0Network"
1051 "\0NextServer\0DNSProxy\0BindIP\0UseHostResolver\0"
1052 "SlirpMTU\0"
1053 "SockRcv\0SockSnd\0TcpRcv\0TcpSnd\0"))
1054 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
1055 N_("Unknown NAT configuration option, only supports PassDomain,"
1056 " TFTPPrefix, BootFile and Network"));
1057
1058 /*
1059 * Init the static parts.
1060 */
1061 pThis->pDrvIns = pDrvIns;
1062 pThis->pNATState = NULL;
1063 pThis->pszTFTPPrefix = NULL;
1064 pThis->pszBootFile = NULL;
1065 pThis->pszNextServer = NULL;
1066 /* IBase */
1067 pDrvIns->IBase.pfnQueryInterface = drvNATQueryInterface;
1068 /* INetwork */
1069 pThis->INetworkUp.pfnAllocBuf = drvNATNetworkUp_AllocBuf;
1070 pThis->INetworkUp.pfnFreeBuf = drvNATNetworkUp_FreeBuf;
1071 pThis->INetworkUp.pfnSendBuf = drvNATNetworkUp_SendBuf;
1072 pThis->INetworkUp.pfnSetPromiscuousMode = drvNATNetworkUp_SetPromiscuousMode;
1073 pThis->INetworkUp.pfnNotifyLinkChanged = drvNATNetworkUp_NotifyLinkChanged;
1074
1075 /*
1076 * Get the configuration settings.
1077 */
1078 int rc;
1079 bool fPassDomain = true;
1080 GET_BOOL(rc, pThis, pCfg, "PassDomain", fPassDomain);
1081
1082 GET_STRING_ALLOC(rc, pThis, pCfg, "TFTPPrefix", pThis->pszTFTPPrefix);
1083 GET_STRING_ALLOC(rc, pThis, pCfg, "BootFile", pThis->pszBootFile);
1084 GET_STRING_ALLOC(rc, pThis, pCfg, "NextServer", pThis->pszNextServer);
1085
1086 int fDNSProxy = 0;
1087 GET_S32(rc, pThis, pCfg, "DNSProxy", fDNSProxy);
1088 int fUseHostResolver = 0;
1089 GET_S32(rc, pThis, pCfg, "UseHostResolver", fUseHostResolver);
1090#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1091 int MTU = 1500;
1092 GET_S32(rc, pThis, pCfg, "SlirpMTU", MTU);
1093#endif
1094
1095 /*
1096 * Query the network port interface.
1097 */
1098 pThis->pIAboveNet = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKDOWN);
1099 if (!pThis->pIAboveNet)
1100 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1101 N_("Configuration error: the above device/driver didn't "
1102 "export the network port interface"));
1103 pThis->pIAboveConfig = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKCONFIG);
1104 if (!pThis->pIAboveConfig)
1105 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1106 N_("Configuration error: the above device/driver didn't "
1107 "export the network config interface"));
1108
1109 /* Generate a network address for this network card. */
1110 char szNetwork[32]; /* xxx.xxx.xxx.xxx/yy */
1111 GET_STRING(rc, pThis, pCfg, "Network", szNetwork[0], sizeof(szNetwork));
1112 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1113 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("NAT%d: Configuration error: "
1114 "missing network"),
1115 pDrvIns->iInstance, szNetwork);
1116
1117 RTIPV4ADDR Network;
1118 RTIPV4ADDR Netmask;
1119 rc = RTCidrStrToIPv4(szNetwork, &Network, &Netmask);
1120 if (RT_FAILURE(rc))
1121 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("NAT#%d: Configuration error: "
1122 "network '%s' describes not a valid IPv4 network"),
1123 pDrvIns->iInstance, szNetwork);
1124
1125 char szNetAddr[16];
1126 RTStrPrintf(szNetAddr, sizeof(szNetAddr), "%d.%d.%d.%d",
1127 (Network & 0xFF000000) >> 24, (Network & 0xFF0000) >> 16,
1128 (Network & 0xFF00) >> 8, Network & 0xFF);
1129
1130 /*
1131 * Initialize slirp.
1132 */
1133 rc = slirp_init(&pThis->pNATState, &szNetAddr[0], Netmask, fPassDomain, !!fUseHostResolver, pThis);
1134 if (RT_SUCCESS(rc))
1135 {
1136 slirp_set_dhcp_TFTP_prefix(pThis->pNATState, pThis->pszTFTPPrefix);
1137 slirp_set_dhcp_TFTP_bootfile(pThis->pNATState, pThis->pszBootFile);
1138 slirp_set_dhcp_next_server(pThis->pNATState, pThis->pszNextServer);
1139 slirp_set_dhcp_dns_proxy(pThis->pNATState, !!fDNSProxy);
1140#ifdef VBOX_WITH_SLIRP_BSD_MBUF
1141 slirp_set_mtu(pThis->pNATState, MTU);
1142#endif
1143 char *pszBindIP = NULL;
1144 GET_STRING_ALLOC(rc, pThis, pCfg, "BindIP", pszBindIP);
1145 rc = slirp_set_binding_address(pThis->pNATState, pszBindIP);
1146 if (rc != 0)
1147 LogRel(("NAT: value of BindIP has been ignored\n"));
1148
1149 if(pszBindIP != NULL)
1150 MMR3HeapFree(pszBindIP);
1151#define SLIRP_SET_TUNING_VALUE(name, setter) \
1152 do \
1153 { \
1154 int len = 0; \
1155 rc = CFGMR3QueryS32(pCfg, name, &len); \
1156 if (RT_SUCCESS(rc)) \
1157 setter(pThis->pNATState, len); \
1158 } while(0)
1159
1160 SLIRP_SET_TUNING_VALUE("SockRcv", slirp_set_rcvbuf);
1161 SLIRP_SET_TUNING_VALUE("SockSnd", slirp_set_sndbuf);
1162 SLIRP_SET_TUNING_VALUE("TcpRcv", slirp_set_tcp_rcvspace);
1163 SLIRP_SET_TUNING_VALUE("TcpSnd", slirp_set_tcp_sndspace);
1164
1165 slirp_register_statistics(pThis->pNATState, pDrvIns);
1166#ifdef VBOX_WITH_STATISTICS
1167# define DRV_PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
1168# define DRV_COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
1169# include "counters.h"
1170#endif
1171
1172 int rc2 = drvNATConstructRedir(pDrvIns->iInstance, pThis, pCfg, Network);
1173 if (RT_SUCCESS(rc2))
1174 {
1175 /*
1176 * Register a load done notification to get the MAC address into the slirp
1177 * engine after we loaded a guest state.
1178 */
1179 rc2 = PDMDrvHlpSSMRegisterLoadDone(pDrvIns, drvNATLoadDone);
1180 AssertRC(rc2);
1181 rc = RTReqCreateQueue(&pThis->pSlirpReqQueue);
1182 if (RT_FAILURE(rc))
1183 {
1184 LogRel(("NAT: Can't create request queue\n"));
1185 return rc;
1186 }
1187
1188
1189 rc = RTReqCreateQueue(&pThis->pRecvReqQueue);
1190 if (RT_FAILURE(rc))
1191 {
1192 LogRel(("NAT: Can't create request queue\n"));
1193 return rc;
1194 }
1195 rc = RTReqCreateQueue(&pThis->pUrgRecvReqQueue);
1196 if (RT_FAILURE(rc))
1197 {
1198 LogRel(("NAT: Can't create request queue\n"));
1199 return rc;
1200 }
1201 rc = PDMDrvHlpPDMThreadCreate(pDrvIns, &pThis->pRecvThread, pThis, drvNATRecv,
1202 drvNATRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATRX");
1203 AssertRC(rc);
1204 rc = RTSemEventCreate(&pThis->EventRecv);
1205
1206 rc = PDMDrvHlpPDMThreadCreate(pDrvIns, &pThis->pUrgRecvThread, pThis, drvNATUrgRecv,
1207 drvNATUrgRecvWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATURGRX");
1208 AssertRC(rc);
1209 rc = RTSemEventCreate(&pThis->EventRecv);
1210 rc = RTSemEventCreate(&pThis->EventUrgRecv);
1211 rc = RTCritSectInit(&pThis->csDevAccess);
1212
1213#ifndef RT_OS_WINDOWS
1214 /*
1215 * Create the control pipe.
1216 */
1217 int fds[2];
1218 if (pipe(&fds[0]) != 0) /** @todo RTPipeCreate() or something... */
1219 {
1220 rc = RTErrConvertFromErrno(errno);
1221 AssertRC(rc);
1222 return rc;
1223 }
1224 pThis->PipeRead = fds[0];
1225 pThis->PipeWrite = fds[1];
1226#else
1227 pThis->hWakeupEvent = CreateEvent(NULL, FALSE, FALSE, NULL); /* auto-reset event */
1228 slirp_register_external_event(pThis->pNATState, pThis->hWakeupEvent,
1229 VBOX_WAKEUP_EVENT_INDEX);
1230#endif
1231
1232 rc = PDMDrvHlpPDMThreadCreate(pDrvIns, &pThis->pSlirpThread, pThis, drvNATAsyncIoThread,
1233 drvNATAsyncIoWakeup, 128 * _1K, RTTHREADTYPE_IO, "NAT");
1234 AssertRC(rc);
1235
1236#ifdef VBOX_WITH_SLIRP_MT
1237 rc = PDMDrvHlpPDMThreadCreate(pDrvIns, &pThis->pGuestThread, pThis, drvNATAsyncIoGuest,
1238 drvNATAsyncIoGuestWakeup, 128 * _1K, RTTHREADTYPE_IO, "NATGUEST");
1239 AssertRC(rc);
1240#endif
1241
1242 pThis->enmLinkState = PDMNETWORKLINKSTATE_UP;
1243
1244 /* might return VINF_NAT_DNS */
1245 return rc;
1246 }
1247 /* failure path */
1248 rc = rc2;
1249 slirp_term(pThis->pNATState);
1250 pThis->pNATState = NULL;
1251 }
1252 else
1253 {
1254 PDMDRV_SET_ERROR(pDrvIns, rc, N_("Unknown error during NAT networking setup: "));
1255 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
1256 }
1257
1258 return rc;
1259}
1260
1261
1262/**
1263 * NAT network transport driver registration record.
1264 */
1265const PDMDRVREG g_DrvNAT =
1266{
1267 /* u32Version */
1268 PDM_DRVREG_VERSION,
1269 /* szName */
1270 "NAT",
1271 /* szRCMod */
1272 "",
1273 /* szR0Mod */
1274 "",
1275 /* pszDescription */
1276 "NAT Network Transport Driver",
1277 /* fFlags */
1278 PDM_DRVREG_FLAGS_HOST_BITS_DEFAULT,
1279 /* fClass. */
1280 PDM_DRVREG_CLASS_NETWORK,
1281 /* cMaxInstances */
1282 16,
1283 /* cbInstance */
1284 sizeof(DRVNAT),
1285 /* pfnConstruct */
1286 drvNATConstruct,
1287 /* pfnDestruct */
1288 drvNATDestruct,
1289 /* pfnRelocate */
1290 NULL,
1291 /* pfnIOCtl */
1292 NULL,
1293 /* pfnPowerOn */
1294 drvNATPowerOn,
1295 /* pfnReset */
1296 NULL,
1297 /* pfnSuspend */
1298 NULL,
1299 /* pfnResume */
1300 NULL,
1301 /* pfnAttach */
1302 NULL,
1303 /* pfnDetach */
1304 NULL,
1305 /* pfnPowerOff */
1306 NULL,
1307 /* pfnSoftReset */
1308 NULL,
1309 /* u32EndVersion */
1310 PDM_DRVREG_VERSION
1311};
1312
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette