VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DrvNATlibslirp.cpp@ 105064

Last change on this file since 105064 was 105064, checked in by vboxsync, 10 months ago

Devices/Network: clean up code and comments. bugref:10268

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.2 KB
Line 
1/* $Id: DrvNATlibslirp.cpp 105064 2024-06-27 14:46:31Z vboxsync $ */
2/** @file
3 * DrvNATlibslirp - NATlibslirp network transport driver.
4 */
5
6/*
7 * Copyright (C) 2022-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DRV_NAT
33#define __STDC_LIMIT_MACROS
34#define __STDC_CONSTANT_MACROS
35
36#include "DrvNATlibslirp.h"
37
38
39/*********************************************************************************************************************************
40* Internal Functions *
41*********************************************************************************************************************************/
42
43/**
44 * @callback_method_impl{FNPDMTHREADDRV}
45 *
46 * Queues guest process received packet. Triggered by drvNATRecvWakeup.
47 */
48static DECLCALLBACK(int) drvNATRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
49{
50 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
51
52 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
53 return VINF_SUCCESS;
54
55 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
56 {
57 RTReqQueueProcess(pThis->hRecvReqQueue, 0);
58 if (ASMAtomicReadU32(&pThis->cPkts) == 0)
59 RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
60 }
61 return VINF_SUCCESS;
62}
63
64/**
65 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
66 */
67static DECLCALLBACK(int) drvNATRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
68{
69 RT_NOREF(pThread);
70 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
71 int rc;
72 rc = RTSemEventSignal(pThis->EventRecv);
73
74 STAM_COUNTER_INC(&pThis->StatNATRecvWakeups);
75 return VINF_SUCCESS;
76}
77
78/**
79 * @callback_method_impl{FNPDMTHREADDRV}
80 */
81static DECLCALLBACK(int) drvNATUrgRecv(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
82{
83 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
84
85 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
86 return VINF_SUCCESS;
87
88 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
89 {
90 RTReqQueueProcess(pThis->hUrgRecvReqQueue, 0);
91 if (ASMAtomicReadU32(&pThis->cUrgPkts) == 0)
92 {
93 int rc = RTSemEventWait(pThis->EventUrgRecv, RT_INDEFINITE_WAIT);
94 AssertRC(rc);
95 }
96 }
97 return VINF_SUCCESS;
98}
99
100/**
101 * @callback_method_impl{FNPDMTHREADWAKEUPDRV}
102 */
103static DECLCALLBACK(int) drvNATUrgRecvWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
104{
105 RT_NOREF(pThread);
106 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
107 int rc = RTSemEventSignal(pThis->EventUrgRecv);
108 AssertRC(rc);
109
110 return VINF_SUCCESS;
111}
112
113/**
114 * @brief Processes incoming packet (to guest).
115 *
116 * @param pThis Pointer to DRVNAT state for current context.
117 * @param pBuf Pointer to packet buffer.
118 * @param cb Size of packet in buffer.
119 *
120 * @thread NAT
121 */
122static DECLCALLBACK(void) drvNATRecvWorker(PDRVNAT pThis, void *pBuf, int cb)
123{
124 int rc;
125 STAM_PROFILE_START(&pThis->StatNATRecv, a);
126
127 while (ASMAtomicReadU32(&pThis->cUrgPkts) != 0)
128 {
129 rc = RTSemEventWait(pThis->EventRecv, RT_INDEFINITE_WAIT);
130 if ( RT_FAILURE(rc)
131 && ( rc == VERR_TIMEOUT
132 || rc == VERR_INTERRUPTED))
133 goto done_unlocked;
134 }
135
136 rc = RTCritSectEnter(&pThis->DevAccessLock);
137 AssertRC(rc);
138
139 STAM_PROFILE_START(&pThis->StatNATRecvWait, b);
140 rc = pThis->pIAboveNet->pfnWaitReceiveAvail(pThis->pIAboveNet, RT_INDEFINITE_WAIT);
141 STAM_PROFILE_STOP(&pThis->StatNATRecvWait, b);
142
143 if (RT_SUCCESS(rc))
144 {
145 rc = pThis->pIAboveNet->pfnReceive(pThis->pIAboveNet, pBuf, cb);
146 AssertRC(rc);
147 RTMemFree(pBuf);
148 pBuf = NULL;
149 }
150 else if ( rc != VERR_TIMEOUT
151 && rc != VERR_INTERRUPTED)
152 {
153 AssertRC(rc);
154 }
155
156 rc = RTCritSectLeave(&pThis->DevAccessLock);
157 AssertRC(rc);
158
159done_unlocked:
160 ASMAtomicDecU32(&pThis->cPkts);
161
162 drvNATNotifyNATThread(pThis, "drvNATRecvWorker");
163
164 RTMemFree(pBuf);
165 pBuf = NULL;
166
167 STAM_PROFILE_STOP(&pThis->StatNATRecv, a);
168}
169
170/**
171 * Frees a S/G buffer allocated by drvNATNetworkUp_AllocBuf.
172 *
173 * @param pThis Pointer to the NAT instance.
174 * @param pSgBuf The S/G buffer to free.
175 *
176 * @thread NAT
177 */
178static void drvNATFreeSgBuf(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
179{
180 RT_NOREF(pThis);
181 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_MAGIC_MASK) == PDMSCATTERGATHER_FLAGS_MAGIC);
182 pSgBuf->fFlags = 0;
183 if (pSgBuf->pvAllocator)
184 {
185 Assert(!pSgBuf->pvUser);
186 RTMemFree(pSgBuf->aSegs[0].pvSeg);
187 }
188 else if (pSgBuf->pvUser)
189 {
190 RTMemFree(pSgBuf->aSegs[0].pvSeg);
191 pSgBuf->aSegs[0].pvSeg = NULL;
192 RTMemFree(pSgBuf->pvUser);
193 pSgBuf->pvUser = NULL;
194 }
195 RTMemFree(pSgBuf);
196}
197
198/**
199 * Worker function for drvNATSend().
200 *
201 * @param pThis Pointer to the NAT instance.
202 * @param pSgBuf The scatter/gather buffer.
203 * @thread NAT
204 */
205static DECLCALLBACK(void) drvNATSendWorker(PDRVNAT pThis, PPDMSCATTERGATHER pSgBuf)
206{
207 LogFlowFunc(("pThis=%p pSgBuf=%p\n", pThis, pSgBuf));
208
209 if (pThis->enmLinkState == PDMNETWORKLINKSTATE_UP)
210 {
211 const uint8_t *m = static_cast<const uint8_t*>(pSgBuf->pvAllocator);
212 if (m)
213 {
214 /*
215 * A normal frame.
216 */
217 LogFlowFunc(("m=%p\n", m));
218 slirp_input(pThis->pNATState->pSlirp, (uint8_t const *)pSgBuf->pvAllocator, (int)pSgBuf->cbUsed);
219 }
220 else
221 {
222 /*
223 * M_EXT buf, need to segment it.
224 */
225
226 uint8_t const *pbFrame = (uint8_t const *)pSgBuf->aSegs[0].pvSeg;
227 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pSgBuf->pvUser;
228 /* Do not attempt to segment frames with invalid GSO parameters. */
229 if (PDMNetGsoIsValid((const PDMNETWORKGSO *)pGso, sizeof(*pGso), pSgBuf->cbUsed))
230 {
231 uint32_t const cSegs = PDMNetGsoCalcSegmentCount(pGso, pSgBuf->cbUsed);
232 Assert(cSegs > 1);
233 for (uint32_t iSeg = 0; iSeg < cSegs; iSeg++)
234 {
235 void *pvSeg;
236
237 /** @todo r=jack: is this fine leaving as a constant instead of dynamic? */
238 pvSeg = RTMemAlloc(DRVNAT_MAXFRAMESIZE);
239
240 uint32_t cbPayload, cbHdrs;
241 uint32_t offPayload = PDMNetGsoCarveSegment(pGso, pbFrame, pSgBuf->cbUsed,
242 iSeg, cSegs, (uint8_t *)pvSeg, &cbHdrs, &cbPayload);
243 memcpy((uint8_t *)pvSeg + cbHdrs, pbFrame + offPayload, cbPayload);
244
245 slirp_input(pThis->pNATState->pSlirp, (uint8_t const *)pvSeg, cbPayload + cbHdrs);
246 RTMemFree(pvSeg);
247 }
248 }
249 }
250 }
251
252 LogFlowFunc(("leave\n"));
253 drvNATFreeSgBuf(pThis, pSgBuf);
254}
255
256/**
257 * @interface_method_impl{PDMINETWORKUP,pfnBeginXmit}
258 */
259static DECLCALLBACK(int) drvNATNetworkUp_BeginXmit(PPDMINETWORKUP pInterface, bool fOnWorkerThread)
260{
261 RT_NOREF(fOnWorkerThread);
262 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
263 int rc = RTCritSectTryEnter(&pThis->XmitLock);
264 if (RT_FAILURE(rc))
265 {
266 /** @todo Kick the worker thread when we have one... */
267 rc = VERR_TRY_AGAIN;
268 }
269 LogFlowFunc(("Beginning xmit...\n"));
270 return rc;
271}
272
273/**
274 * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf}
275 */
276static DECLCALLBACK(int) drvNATNetworkUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin,
277 PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf)
278{
279 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
280 Assert(RTCritSectIsOwner(&pThis->XmitLock));
281
282 LogFlowFunc(("enter\n"));
283
284 /*
285 * Drop the incoming frame if the NAT thread isn't running.
286 */
287 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
288 {
289 Log(("drvNATNetowrkUp_AllocBuf: returns VERR_NET_NO_NETWORK\n"));
290 return VERR_NET_NO_NETWORK;
291 }
292
293 /*
294 * Allocate a scatter/gather buffer and an mbuf.
295 */
296 PPDMSCATTERGATHER pSgBuf = (PPDMSCATTERGATHER)RTMemAllocZ(sizeof(PDMSCATTERGATHER));
297 if (!pSgBuf)
298 return VERR_NO_MEMORY;
299 if (!pGso)
300 {
301 /*
302 * Drop the frame if it is too big.
303 */
304 if (cbMin >= DRVNAT_MAXFRAMESIZE)
305 {
306 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
307 cbMin));
308 RTMemFree(pSgBuf);
309 return VERR_INVALID_PARAMETER;
310 }
311
312 pSgBuf->pvUser = NULL;
313 pSgBuf->aSegs[0].cbSeg = RT_ALIGN_Z(cbMin, 128);
314 pSgBuf->aSegs[0].pvSeg = RTMemAlloc(pSgBuf->aSegs[0].cbSeg);
315 pSgBuf->pvAllocator = pSgBuf->aSegs[0].pvSeg;
316
317 if (!pSgBuf->pvAllocator)
318 {
319 RTMemFree(pSgBuf);
320 return VERR_TRY_AGAIN;
321 }
322 }
323 else
324 {
325 /*
326 * Drop the frame if its segment is too big.
327 */
328 if (pGso->cbHdrsTotal + pGso->cbMaxSeg >= DRVNAT_MAXFRAMESIZE)
329 {
330 Log(("drvNATNetowrkUp_AllocBuf: drops over-sized frame (%u bytes), returns VERR_INVALID_PARAMETER\n",
331 pGso->cbHdrsTotal + pGso->cbMaxSeg));
332 RTMemFree(pSgBuf);
333 return VERR_INVALID_PARAMETER;
334 }
335
336 pSgBuf->pvUser = RTMemDup(pGso, sizeof(*pGso));
337 pSgBuf->pvAllocator = NULL;
338
339 /** @todo r=jack: figure out why need *2 */
340 pSgBuf->aSegs[0].cbSeg = RT_ALIGN_Z(cbMin*2, 128);
341 pSgBuf->aSegs[0].pvSeg = RTMemAlloc(pSgBuf->aSegs[0].cbSeg);
342 if (!pSgBuf->pvUser || !pSgBuf->aSegs[0].pvSeg)
343 {
344 RTMemFree(pSgBuf->aSegs[0].pvSeg);
345 RTMemFree(pSgBuf->pvUser);
346 RTMemFree(pSgBuf);
347 return VERR_TRY_AGAIN;
348 }
349 }
350
351 /*
352 * Initialize the S/G buffer and return.
353 */
354 pSgBuf->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1;
355 pSgBuf->cbUsed = 0;
356 pSgBuf->cbAvailable = pSgBuf->aSegs[0].cbSeg;
357 pSgBuf->cSegs = 1;
358
359 *ppSgBuf = pSgBuf;
360 return VINF_SUCCESS;
361}
362
363/**
364 * @interface_method_impl{PDMINETWORKUP,pfnFreeBuf}
365 */
366static DECLCALLBACK(int) drvNATNetworkUp_FreeBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf)
367{
368 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
369 Assert(RTCritSectIsOwner(&pThis->XmitLock));
370 drvNATFreeSgBuf(pThis, pSgBuf);
371 return VINF_SUCCESS;
372}
373
374/**
375 * @interface_method_impl{PDMINETWORKUP,pfnSendBuf}
376 */
377static DECLCALLBACK(int) drvNATNetworkUp_SendBuf(PPDMINETWORKUP pInterface, PPDMSCATTERGATHER pSgBuf, bool fOnWorkerThread)
378{
379 RT_NOREF(fOnWorkerThread);
380 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
381 Assert((pSgBuf->fFlags & PDMSCATTERGATHER_FLAGS_OWNER_MASK) == PDMSCATTERGATHER_FLAGS_OWNER_1);
382 Assert(RTCritSectIsOwner(&pThis->XmitLock));
383
384 LogFlowFunc(("enter\n"));
385
386 int rc;
387 if (pThis->pSlirpThread->enmState == PDMTHREADSTATE_RUNNING)
388 {
389 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, NULL /*ppReq*/, 0 /*cMillies*/,
390 RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
391 (PFNRT)drvNATSendWorker, 2, pThis, pSgBuf);
392 if (RT_SUCCESS(rc))
393 {
394 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_SendBuf");
395 LogFlowFunc(("leave success\n"));
396 return VINF_SUCCESS;
397 }
398
399 rc = VERR_NET_NO_BUFFER_SPACE;
400 }
401 else
402 rc = VERR_NET_DOWN;
403 drvNATFreeSgBuf(pThis, pSgBuf);
404 LogFlowFunc(("leave rc=%Rrc\n", rc));
405 return rc;
406}
407
408/**
409 * @interface_method_impl{PDMINETWORKUP,pfnEndXmit}
410 */
411static DECLCALLBACK(void) drvNATNetworkUp_EndXmit(PPDMINETWORKUP pInterface)
412{
413 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
414 RTCritSectLeave(&pThis->XmitLock);
415}
416
417/**
418 * Get the NAT thread out of poll/WSAWaitForMultipleEvents
419 */
420static void drvNATNotifyNATThread(PDRVNAT pThis, const char *pszWho)
421{
422 RT_NOREF(pszWho);
423 int rc;
424#ifndef RT_OS_WINDOWS
425 /* kick poll() */
426 size_t cbIgnored;
427 rc = RTPipeWrite(pThis->hPipeWrite, "", 1, &cbIgnored);
428#else
429 /* kick WSAWaitForMultipleEvents */
430 rc = WSASetEvent(pThis->hWakeupEvent);
431#endif
432 AssertRC(rc);
433}
434
435/**
436 * @interface_method_impl{PDMINETWORKUP,pfnSetPromiscuousMode}
437 */
438static DECLCALLBACK(void) drvNATNetworkUp_SetPromiscuousMode(PPDMINETWORKUP pInterface, bool fPromiscuous)
439{
440 RT_NOREF(pInterface, fPromiscuous);
441 LogFlow(("drvNATNetworkUp_SetPromiscuousMode: fPromiscuous=%d\n", fPromiscuous));
442 /* nothing to do */
443}
444
445/**
446 * Worker function for drvNATNetworkUp_NotifyLinkChanged().
447 * @thread "NAT" thread.
448 *
449 * @param pThis Pointer to DRVNAT state for current context.
450 * @param enmLinkState Enum value of link state.
451 *
452 * @thread NAT
453 */
454static DECLCALLBACK(void) drvNATNotifyLinkChangedWorker(PDRVNAT pThis, PDMNETWORKLINKSTATE enmLinkState)
455{
456 pThis->enmLinkState = pThis->enmLinkStateWant = enmLinkState;
457 switch (enmLinkState)
458 {
459 case PDMNETWORKLINKSTATE_UP:
460 LogRel(("NAT: Link up\n"));
461 break;
462
463 case PDMNETWORKLINKSTATE_DOWN:
464 case PDMNETWORKLINKSTATE_DOWN_RESUME:
465 LogRel(("NAT: Link down\n"));
466 break;
467
468 default:
469 AssertMsgFailed(("drvNATNetworkUp_NotifyLinkChanged: unexpected link state %d\n", enmLinkState));
470 }
471}
472
473/**
474 * Notification on link status changes.
475 *
476 * @param pInterface Pointer to the interface structure containing the called function pointer.
477 * @param enmLinkState The new link state.
478 *
479 * @thread EMT
480 */
481static DECLCALLBACK(void) drvNATNetworkUp_NotifyLinkChanged(PPDMINETWORKUP pInterface, PDMNETWORKLINKSTATE enmLinkState)
482{
483 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkUp);
484
485 LogFlow(("drvNATNetworkUp_NotifyLinkChanged: enmLinkState=%d\n", enmLinkState));
486
487 /* Don't queue new requests if the NAT thread is not running (e.g. paused,
488 * stopping), otherwise we would deadlock. Memorize the change. */
489 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
490 {
491 pThis->enmLinkStateWant = enmLinkState;
492 return;
493 }
494
495 PRTREQ pReq;
496 int rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
497 (PFNRT)drvNATNotifyLinkChangedWorker, 2, pThis, enmLinkState);
498 if (rc == VERR_TIMEOUT)
499 {
500 drvNATNotifyNATThread(pThis, "drvNATNetworkUp_NotifyLinkChanged");
501 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
502 AssertRC(rc);
503 }
504 else
505 AssertRC(rc);
506 RTReqRelease(pReq);
507}
508
509/**
510 * Registers poll. Unused function (other than logging).
511 */
512static void drvNAT_RegisterPoll(int fd, void *opaque) {
513 RT_NOREF(fd, opaque);
514 Log4(("Poll registered\n"));
515}
516
517/**
518 * Unregisters poll. Unused function (other than logging).
519 */
520static void drvNAT_UnregisterPoll(int fd, void *opaque) {
521 RT_NOREF(fd, opaque);
522 Log4(("Poll unregistered\n"));
523}
524
525/**
526 * Converts slirp representation of poll events to host representation.
527 *
528 * @param iEvents Integer representing slirp type poll events.
529 *
530 * @returns Integer representing host type poll events.
531 *
532 * @thread ?
533 */
534static int drvNAT_PollEventSlirpToHost(int iEvents) {
535 int iRet = 0;
536#ifndef RT_OS_WINDOWS
537 if (iEvents & SLIRP_POLL_IN) iRet |= POLLIN;
538 if (iEvents & SLIRP_POLL_OUT) iRet |= POLLOUT;
539 if (iEvents & SLIRP_POLL_PRI) iRet |= POLLPRI;
540 if (iEvents & SLIRP_POLL_ERR) iRet |= POLLERR;
541 if (iEvents & SLIRP_POLL_HUP) iRet |= POLLHUP;
542#else
543 if (iEvents & SLIRP_POLL_IN) iRet |= (POLLRDNORM | POLLRDBAND);
544 if (iEvents & SLIRP_POLL_OUT) iRet |= POLLWRNORM;
545 if (iEvents & SLIRP_POLL_PRI) iRet |= (POLLIN);
546 if (iEvents & SLIRP_POLL_ERR) iRet |= 0;
547 if (iEvents & SLIRP_POLL_HUP) iRet |= 0;
548#endif
549 return iRet;
550}
551
552/**
553 * Converts host representation of poll events to slirp representation.
554 *
555 * @param iEvents Integer representing host type poll events.
556 *
557 * @returns Integer representing slirp type poll events.
558 *
559 * @thread ?
560 */
561static int drvNAT_PollEventHostToSlirp(int iEvents) {
562 int iRet = 0;
563#ifndef RT_OS_WINDOWS
564 if (iEvents & POLLIN) iRet |= SLIRP_POLL_IN;
565 if (iEvents & POLLOUT) iRet |= SLIRP_POLL_OUT;
566 if (iEvents & POLLPRI) iRet |= SLIRP_POLL_PRI;
567 if (iEvents & POLLERR) iRet |= SLIRP_POLL_ERR;
568 if (iEvents & POLLHUP) iRet |= SLIRP_POLL_HUP;
569#else
570 if (iEvents & (POLLRDNORM | POLLRDBAND)) iRet |= SLIRP_POLL_IN;
571 if (iEvents & POLLWRNORM) iRet |= SLIRP_POLL_OUT;
572 if (iEvents & (POLLPRI)) iRet |= SLIRP_POLL_PRI;
573 if (iEvents & POLLERR) iRet |= SLIRP_POLL_ERR;
574 if (iEvents & POLLHUP) iRet |= SLIRP_POLL_HUP;
575#endif
576 return iRet;
577}
578
579/**
580 * Callback function to add entry to pollfd array.
581 *
582 * @param iFd Integer of system file descriptor of socket.
583 * (on windows, this is a VBox internal, not system, value).
584 * @param iEvents Integer of slirp type poll events.
585 * @param opaque Pointer to NAT State context.
586 *
587 * @returns Index of latest pollfd entry.
588 *
589 * @thread ?
590 */
591static int drvNAT_addPollCb(int iFd, int iEvents, void *opaque)
592{
593 PDRVNAT pThis = (PDRVNAT)opaque;
594
595 if (pThis->pNATState->nsock + 1 >= pThis->pNATState->uPollCap)
596 {
597 int cbNew = pThis->pNATState->uPollCap * 2 * sizeof(struct pollfd);
598 struct pollfd *pvNew = (struct pollfd *)RTMemRealloc(pThis->pNATState->polls, cbNew);
599 if(pvNew)
600 {
601 pThis->pNATState->polls = pvNew;
602 pThis->pNATState->uPollCap *= 2;
603 }
604 else
605 return -1;
606 }
607
608 int idx = pThis->pNATState->nsock;
609#ifdef RT_OS_WINDOWS
610 pThis->pNATState->polls[idx].fd = libslirp_wrap_RTHandleTableLookup(iFd);
611#else
612 pThis->pNATState->polls[idx].fd = iFd;
613#endif
614 pThis->pNATState->polls[idx].events = drvNAT_PollEventSlirpToHost(iEvents);
615 pThis->pNATState->polls[idx].revents = 0;
616 pThis->pNATState->nsock += 1;
617 return idx;
618}
619
620/**
621 * Get translated revents from a poll at a given index.
622 *
623 * @param idx Integer index of poll.
624 * @param opaque Pointer to NAT State context.
625 *
626 * @returns Integer representing transalted revents.
627 *
628 * @thread ?
629 */
630static int get_revents_cb(int idx, void *opaque)
631{
632 PDRVNAT pThis = (PDRVNAT)opaque;
633 struct pollfd* polls = pThis->pNATState->polls;
634 return drvNAT_PollEventHostToSlirp(polls[idx].revents);
635}
636
637/**
638 * NAT thread handling the slirp stuff.
639 *
640 * The slirp implementation is single-threaded so we execute this enginre in a
641 * dedicated thread. We take care that this thread does not become the
642 * bottleneck: If the guest wants to send, a request is enqueued into the
643 * hSlirpReqQueue and handled asynchronously by this thread. If this thread
644 * wants to deliver packets to the guest, it enqueues a request into
645 * hRecvReqQueue which is later handled by the Recv thread.
646 *
647 * @param pDrvIns Pointer to PDM driver context.
648 * @param pThread Pointer to calling thread context.
649 *
650 * @returns VBox status code
651 *
652 * @thread NAT
653 */
654static DECLCALLBACK(int) drvNATAsyncIoThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
655{
656 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
657#ifdef RT_OS_WINDOWS
658 unsigned int cBreak = 0;
659#else /* RT_OS_WINDOWS */
660 unsigned int cPollNegRet = 0;
661 drvNAT_addPollCb(RTPipeToNative(pThis->hPipeRead), SLIRP_POLL_IN | SLIRP_POLL_HUP, pThis);
662 pThis->pNATState->polls[0].fd = RTPipeToNative(pThis->hPipeRead);
663 pThis->pNATState->polls[0].events = POLLRDNORM | POLLPRI | POLLRDBAND;
664 pThis->pNATState->polls[0].revents = 0;
665#endif /* !RT_OS_WINDOWS */
666
667 LogFlow(("drvNATAsyncIoThread: pThis=%p\n", pThis));
668
669 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
670 return VINF_SUCCESS;
671
672 if (pThis->enmLinkStateWant != pThis->enmLinkState)
673 drvNATNotifyLinkChangedWorker(pThis, pThis->enmLinkStateWant);
674
675 /*
676 * Polling loop.
677 */
678 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
679 {
680 /*
681 * To prevent concurrent execution of sending/receiving threads
682 */
683#ifndef RT_OS_WINDOWS
684 uint32_t uTimeout = 0;
685 pThis->pNATState->nsock = 1;
686
687 slirp_pollfds_fill(pThis->pNATState->pSlirp, &uTimeout, drvNAT_addPollCb /* SlirpAddPollCb */, pThis /* opaque */);
688 slirpUpdateTimeout(&uTimeout, pThis);
689
690 int cChangedFDs = poll(pThis->pNATState->polls, pThis->pNATState->nsock, uTimeout /* timeout */);
691
692 if (cChangedFDs < 0)
693 {
694 if (errno == EINTR)
695 {
696 Log2(("NAT: signal was caught while sleep on poll\n"));
697 /* No error, just process all outstanding requests but don't wait */
698 cChangedFDs = 0;
699 }
700 else if (cPollNegRet++ > 128)
701 {
702 LogRel(("NAT: Poll returns (%s) suppressed %d\n", strerror(errno), cPollNegRet));
703 cPollNegRet = 0;
704 }
705 }
706
707
708 slirp_pollfds_poll(pThis->pNATState->pSlirp, cChangedFDs < 0, get_revents_cb /* SlirpGetREventsCb */, pThis /* opaque */);
709 if (pThis->pNATState->polls[0].revents & (POLLRDNORM|POLLPRI|POLLRDBAND))
710 {
711 /* drain the pipe
712 *
713 * Note! drvNATSend decoupled so we don't know how many times
714 * device's thread sends before we've entered multiplex,
715 * so to avoid false alarm drain pipe here to the very end
716 *
717 * @todo: Probably we should counter drvNATSend to count how
718 * deep pipe has been filed before drain.
719 *
720 */
721 /** @todo XXX: Make it reading exactly we need to drain the
722 * pipe.*/
723 char ch;
724 size_t cbRead;
725 RTPipeRead(pThis->hPipeRead, &ch, 1, &cbRead);
726 }
727
728 /* process _all_ outstanding requests but don't wait */
729 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
730 slirpCheckTimeout(pThis);
731
732#else /* RT_OS_WINDOWS */
733 uint32_t uTimeout = 0;
734 pThis->pNATState->nsock = 0;
735 slirp_pollfds_fill(pThis->pNATState->pSlirp, &uTimeout, drvNAT_addPollCb /* SlirpAddPollCb */, pThis /* opaque */);
736 slirpUpdateTimeout(&uTimeout, pThis);
737
738 int cChangedFDs = WSAPoll(pThis->pNATState->polls, pThis->pNATState->nsock, uTimeout /* timeout */);
739 int error = WSAGetLastError();
740
741 if (cChangedFDs < 0)
742 {
743 LogFlow(("NAT: WSAPoll returned %d (error %d)\n", cChangedFDs, error));
744 LogFlow(("NSOCK = %d\n", pThis->pNATState->nsock));
745
746 if (error == 10022)
747 RTThreadSleep(100);
748 }
749
750 if (cChangedFDs == 0)
751 {
752 /* only check for slow/fast timers */
753 slirp_pollfds_poll(pThis->pNATState->pSlirp, false /*select error*/, get_revents_cb /* SlirpGetREventsCb */, pThis /* opaque */);
754 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
755 continue;
756 }
757 /* poll the sockets in any case */
758 Log2(("%s: poll\n", __FUNCTION__));
759 slirp_pollfds_poll(pThis->pNATState->pSlirp, cChangedFDs < 0 /*select error*/, get_revents_cb /* SlirpGetREventsCb */, pThis /* opaque */);
760
761 /* process _all_ outstanding requests but don't wait */
762 RTReqQueueProcess(pThis->hSlirpReqQueue, 0);
763 slirpCheckTimeout(pThis);
764# ifdef VBOX_NAT_DELAY_HACK
765 if (cBreak++ > 128)
766 {
767 cBreak = 0;
768 RTThreadSleep(2);
769 }
770# endif
771#endif /* RT_OS_WINDOWS */
772 }
773
774 return VINF_SUCCESS;
775}
776
777/**
778 * Unblock the send thread so it can respond to a state change.
779 *
780 * @returns VBox status code.
781 * @param pDevIns The pcnet device instance.
782 * @param pThread The send thread.
783 *
784 * @thread ?
785 */
786static DECLCALLBACK(int) drvNATAsyncIoWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
787{
788 RT_NOREF(pThread);
789 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
790
791 drvNATNotifyNATThread(pThis, "drvNATAsyncIoWakeup");
792 return VINF_SUCCESS;
793}
794
795/** @todo r=jack: do we need? */
796static DECLCALLBACK(int) drvNATHostResThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
797{
798 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
799
800 if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
801 return VINF_SUCCESS;
802
803 while (pThread->enmState == PDMTHREADSTATE_RUNNING)
804 {
805 RTReqQueueProcess(pThis->hHostResQueue, RT_INDEFINITE_WAIT);
806 }
807
808 return VINF_SUCCESS;
809}
810
811static DECLCALLBACK(int) drvNATReqQueueInterrupt()
812{
813 /*
814 * RTReqQueueProcess loops until request returns a warning or info
815 * status code (other than VINF_SUCCESS).
816 */
817 return VINF_INTERRUPTED;
818}
819
820static DECLCALLBACK(int) drvNATHostResWakeup(PPDMDRVINS pDrvIns, PPDMTHREAD pThread)
821{
822 RT_NOREF(pThread);
823 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
824 Assert(pThis != NULL);
825
826 int rc;
827 rc = RTReqQueueCallEx(pThis->hHostResQueue, NULL /*ppReq*/, 0 /*cMillies*/,
828 RTREQFLAGS_IPRT_STATUS | RTREQFLAGS_NO_WAIT,
829 (PFNRT)drvNATReqQueueInterrupt, 0);
830 return rc;
831}
832
833/**
834 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
835 */
836static DECLCALLBACK(void *) drvNATQueryInterface(PPDMIBASE pInterface, const char *pszIID)
837{
838 PPDMDRVINS pDrvIns = PDMIBASE_2_PDMDRV(pInterface);
839 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
840
841 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pDrvIns->IBase);
842 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKUP, &pThis->INetworkUp);
843 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKNATCONFIG, &pThis->INetworkNATCfg);
844 return NULL;
845}
846
847/**
848 * Info handler.
849 */
850static DECLCALLBACK(void) drvNATInfo(PPDMDRVINS pDrvIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
851{
852 RT_NOREF(pszArgs);
853 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
854 pHlp->pfnPrintf(pHlp, "libslirp Connection Info:\n");
855 pHlp->pfnPrintf(pHlp, slirp_connection_info(pThis->pNATState->pSlirp));
856 pHlp->pfnPrintf(pHlp, "libslirp Neighbor Info:\n");
857 pHlp->pfnPrintf(pHlp, slirp_neighbor_info(pThis->pNATState->pSlirp));
858 pHlp->pfnPrintf(pHlp, "libslirp Version String: %s \n", slirp_version_string());
859}
860
861/**
862 * Sets up the redirectors.
863 *
864 * @returns VBox status code.
865 * @param pCfg The configuration handle.
866 */
867static int drvNATConstructRedir(unsigned iInstance, PDRVNAT pThis, PCFGMNODE pCfg, PRTNETADDRIPV4 pNetwork)
868{
869 PPDMDRVINS pDrvIns = pThis->pDrvIns;
870 PCPDMDRVHLPR3 pHlp = pDrvIns->pHlpR3;
871
872 RT_NOREF(pNetwork); /** @todo figure why pNetwork isn't used */
873
874 PCFGMNODE pPFTree = pHlp->pfnCFGMGetChild(pCfg, "PortForwarding");
875 if (pPFTree == NULL)
876 return VINF_SUCCESS;
877
878 /*
879 * Enumerate redirections.
880 */
881 for (PCFGMNODE pNode = pHlp->pfnCFGMGetFirstChild(pPFTree); pNode; pNode = pHlp->pfnCFGMGetNextChild(pNode))
882 {
883 /*
884 * Validate the port forwarding config.
885 */
886 if (!pHlp->pfnCFGMAreValuesValid(pNode, "Name\0Protocol\0UDP\0HostPort\0GuestPort\0GuestIP\0BindIP\0"))
887 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES,
888 N_("Unknown configuration in port forwarding"));
889
890 /* protocol type */
891 bool fUDP;
892 char szProtocol[32];
893 int rc;
894 GET_STRING(rc, pDrvIns, pNode, "Protocol", szProtocol[0], sizeof(szProtocol));
895 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
896 {
897 fUDP = false;
898 GET_BOOL(rc, pDrvIns, pNode, "UDP", fUDP);
899 }
900 else if (RT_SUCCESS(rc))
901 {
902 if (!RTStrICmp(szProtocol, "TCP"))
903 fUDP = false;
904 else if (!RTStrICmp(szProtocol, "UDP"))
905 fUDP = true;
906 else
907 return PDMDrvHlpVMSetError(pDrvIns, VERR_INVALID_PARAMETER, RT_SRC_POS,
908 N_("NAT#%d: Invalid configuration value for \"Protocol\": \"%s\""),
909 iInstance, szProtocol);
910 }
911 else
912 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
913 N_("NAT#%d: configuration query for \"Protocol\" failed"),
914 iInstance);
915 /* host port */
916 int32_t iHostPort;
917 GET_S32_STRICT(rc, pDrvIns, pNode, "HostPort", iHostPort);
918
919 /* guest port */
920 int32_t iGuestPort;
921 GET_S32_STRICT(rc, pDrvIns, pNode, "GuestPort", iGuestPort);
922
923 /* host address ("BindIP" name is rather unfortunate given "HostPort" to go with it) */
924 struct in_addr BindIP;
925 RT_ZERO(BindIP);
926 GETIP_DEF(rc, pDrvIns, pNode, BindIP, INADDR_ANY);
927
928 /* guest address */
929 struct in_addr GuestIP;
930 RT_ZERO(GuestIP);
931 GETIP_DEF(rc, pDrvIns, pNode, GuestIP, INADDR_ANY);
932
933 /*
934 * Call slirp about it.
935 */
936 if (slirp_add_hostfwd(pThis->pNATState->pSlirp, fUDP, BindIP,
937 iHostPort, GuestIP, iGuestPort) < 0)
938 return PDMDrvHlpVMSetError(pThis->pDrvIns, VERR_NAT_REDIR_SETUP, RT_SRC_POS,
939 N_("NAT#%d: configuration error: failed to set up "
940 "redirection of %d to %d. Probably a conflict with "
941 "existing services or other rules"), iInstance, iHostPort,
942 iGuestPort);
943 } /* for each redir rule */
944
945 return VINF_SUCCESS;
946}
947
948static DECLCALLBACK(void) drvNATNotifyApplyPortForwardCommand(PDRVNAT pThis, bool fRemove,
949 bool fUdp, const char *pHostIp,
950 uint16_t u16HostPort, const char *pGuestIp, uint16_t u16GuestPort)
951{
952 struct in_addr guestIp, hostIp;
953
954 if ( pHostIp == NULL
955 || inet_aton(pHostIp, &hostIp) == 0)
956 hostIp.s_addr = INADDR_ANY;
957
958 if ( pGuestIp == NULL
959 || inet_aton(pGuestIp, &guestIp) == 0)
960 guestIp.s_addr = pThis->GuestIP;
961
962 if (fRemove)
963 slirp_remove_hostfwd(pThis->pNATState->pSlirp, fUdp, hostIp, u16HostPort);
964 else
965 slirp_add_hostfwd(pThis->pNATState->pSlirp, fUdp, hostIp,
966 u16HostPort, guestIp, u16GuestPort);
967}
968
969static DECLCALLBACK(int) drvNATNetworkNatConfigRedirect(PPDMINETWORKNATCONFIG pInterface, bool fRemove,
970 bool fUdp, const char *pHostIp, uint16_t u16HostPort,
971 const char *pGuestIp, uint16_t u16GuestPort)
972{
973 LogFlowFunc(("fRemove=%d, fUdp=%d, pHostIp=%s, u16HostPort=%u, pGuestIp=%s, u16GuestPort=%u\n",
974 RT_BOOL(fRemove), RT_BOOL(fUdp), pHostIp, u16HostPort, pGuestIp, u16GuestPort));
975 PDRVNAT pThis = RT_FROM_MEMBER(pInterface, DRVNAT, INetworkNATCfg);
976 /* Execute the command directly if the VM is not running. */
977 int rc;
978 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
979 {
980 drvNATNotifyApplyPortForwardCommand(pThis, fRemove, fUdp, pHostIp,
981 u16HostPort, pGuestIp,u16GuestPort);
982 rc = VINF_SUCCESS;
983 }
984 else
985 {
986 PRTREQ pReq;
987 rc = RTReqQueueCallEx(pThis->hSlirpReqQueue, &pReq, 0 /*cMillies*/, RTREQFLAGS_VOID,
988 (PFNRT)drvNATNotifyApplyPortForwardCommand, 7, pThis, fRemove,
989 fUdp, pHostIp, u16HostPort, pGuestIp, u16GuestPort);
990 if (rc == VERR_TIMEOUT)
991 {
992 drvNATNotifyNATThread(pThis, "drvNATNetworkNatConfigRedirect");
993 rc = RTReqWait(pReq, RT_INDEFINITE_WAIT);
994 AssertRC(rc);
995 }
996 else
997 AssertRC(rc);
998
999 RTReqRelease(pReq);
1000 }
1001 return rc;
1002}
1003
1004static void slirpUpdateTimeout(uint32_t *uTimeout, void *opaque)
1005{
1006 PDRVNAT pThis = (PDRVNAT)opaque;
1007 Assert(pThis);
1008
1009 int64_t currTime = slirpClockGetNsCb(pThis) / (1000 * 1000);
1010 SlirpTimer *pCurrent = pThis->pNATState->pTimerHead;
1011 while (pCurrent != NULL)
1012 {
1013 if (pCurrent->uTimeExpire != -1)
1014 {
1015 int64_t diff = pCurrent->uTimeExpire - currTime;
1016
1017 if (diff < 0)
1018 diff = 0;
1019
1020 if (diff < *uTimeout)
1021 *uTimeout = diff;
1022 }
1023
1024 pCurrent = pCurrent->next;
1025 }
1026}
1027
1028static void slirpCheckTimeout(void *opaque)
1029{
1030 PDRVNAT pThis = (PDRVNAT)opaque;
1031 Assert(pThis);
1032
1033 int64_t currTime = slirpClockGetNsCb(pThis) / (1000 * 1000);
1034 SlirpTimer *pCurrent = pThis->pNATState->pTimerHead;
1035 while (pCurrent != NULL)
1036 {
1037 if (pCurrent->uTimeExpire != -1)
1038 {
1039 int64_t diff = pCurrent->uTimeExpire - currTime;
1040 if (diff <= 0)
1041 {
1042 pCurrent->uTimeExpire = -1;
1043 pCurrent->pHandler(pCurrent->opaque);
1044 }
1045 }
1046
1047 pCurrent = pCurrent->next;
1048 }
1049}
1050
1051/**
1052 * CALLBACKS
1053 */
1054static DECLCALLBACK(ssize_t) slirpSendPacketCb(const void *pBuf, size_t cb, void *opaque /* PDRVNAT */)
1055{
1056 char *pNewBuf = (char *)RTMemAlloc(cb);
1057 memcpy(pNewBuf, pBuf, cb);
1058
1059 PDRVNAT pThis = (PDRVNAT)opaque;
1060 Assert(pThis);
1061
1062 LogFlow(("slirp_output BEGIN %p %d\n", pNewBuf, cb));
1063 Log6(("slirp_output: pu8Buf=%p cb=%#x (pThis=%p)\n%.*Rhxd\n", pNewBuf, cb, pThis));
1064
1065 /* don't queue new requests when the NAT thread is about to stop */
1066 if (pThis->pSlirpThread->enmState != PDMTHREADSTATE_RUNNING)
1067 return -1;
1068
1069 ASMAtomicIncU32(&pThis->cPkts);
1070 int rc = RTReqQueueCallEx(pThis->hRecvReqQueue, NULL /*ppReq*/, 0 /*cMillies*/, RTREQFLAGS_VOID | RTREQFLAGS_NO_WAIT,
1071 (PFNRT)drvNATRecvWorker, 3, pThis, pNewBuf, cb);
1072 AssertRC(rc);
1073 drvNATRecvWakeup(pThis->pDrvIns, pThis->pRecvThread);
1074 drvNATNotifyNATThread(pThis, "slirpSendPacketCb");
1075 STAM_COUNTER_INC(&pThis->StatQueuePktSent);
1076 LogFlowFuncLeave();
1077 return cb;
1078}
1079
1080static DECLCALLBACK(void) slirpGuestErrorCb(const char *pMsg, void *opaque)
1081{
1082 PDRVNAT pThis = (PDRVNAT)opaque;
1083 Assert(pThis);
1084
1085 PDMDRV_SET_ERROR(pThis->pDrvIns, VERR_PDM_UNKNOWN_DRVREG_VERSION,
1086 N_("Unknown error: "));
1087 LogRel((pMsg));
1088}
1089
1090static DECLCALLBACK(int64_t) slirpClockGetNsCb(void *opaque)
1091{
1092 PDRVNAT pThis = (PDRVNAT)opaque;
1093 Assert(pThis);
1094
1095 return (int64_t)RTTimeNanoTS();
1096}
1097
1098static DECLCALLBACK(void *) slirpTimerNewCb(SlirpTimerCb slirpTimeCb, void *cb_opaque, void *opaque)
1099{
1100 PDRVNAT pThis = (PDRVNAT)opaque;
1101 Assert(pThis);
1102
1103 SlirpTimer *pNewTimer = (SlirpTimer *)RTMemAlloc(sizeof(SlirpTimer));
1104 if (!pNewTimer)
1105 return NULL;
1106
1107 pNewTimer->next = pThis->pNATState->pTimerHead;
1108 pNewTimer->uTimeExpire = -1;
1109 pNewTimer->pHandler = slirpTimeCb;
1110 pNewTimer->opaque = cb_opaque;
1111 pThis->pNATState->pTimerHead = pNewTimer;
1112
1113 return pNewTimer;
1114}
1115
1116static DECLCALLBACK(void) slirpTimerFreeCb(void *pTimer, void *opaque)
1117{
1118 PDRVNAT pThis = (PDRVNAT)opaque;
1119 Assert(pThis);
1120 SlirpTimer *pCurrent = pThis->pNATState->pTimerHead;
1121
1122 while (pCurrent != NULL)
1123 {
1124 if (pCurrent == (SlirpTimer *)pTimer)
1125 {
1126 SlirpTimer *pTmp = pCurrent->next;
1127 RTMemFree(pCurrent);
1128 pCurrent = pTmp;
1129 }
1130 else
1131 pCurrent = pCurrent->next;
1132 }
1133}
1134
1135static DECLCALLBACK(void) slirpTimerModCb(void *pTimer, int64_t expireTime, void *opaque)
1136{
1137 PDRVNAT pThis = (PDRVNAT)opaque;
1138 Assert(pThis);
1139
1140 ((SlirpTimer *)pTimer)->uTimeExpire = expireTime;
1141}
1142
1143static DECLCALLBACK(void) slirpNotifyCb(void *opaque)
1144{
1145 PDRVNAT pThis = (PDRVNAT)opaque;
1146
1147 drvNATAsyncIoWakeup(pThis->pDrvIns, NULL);
1148}
1149
1150/**
1151 * Destruct a driver instance.
1152 *
1153 * Most VM resources are freed by the VM. This callback is provided so that any non-VM
1154 * resources can be freed correctly.
1155 *
1156 * @param pDrvIns The driver instance data.
1157 */
1158static DECLCALLBACK(void) drvNATDestruct(PPDMDRVINS pDrvIns)
1159{
1160 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1161 LogFlow(("drvNATDestruct:\n"));
1162 PDMDRV_CHECK_VERSIONS_RETURN_VOID(pDrvIns);
1163
1164 if (pThis->pNATState)
1165 {
1166 slirp_cleanup(pThis->pNATState->pSlirp);
1167#ifdef VBOX_WITH_STATISTICS
1168# define DRV_PROFILE_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1169# define DRV_COUNTING_COUNTER(name, dsc) DEREGISTER_COUNTER(name, pThis)
1170# include "slirp/counters.h"
1171#endif
1172 pThis->pNATState = NULL;
1173 }
1174
1175 RTReqQueueDestroy(pThis->hHostResQueue);
1176 pThis->hHostResQueue = NIL_RTREQQUEUE;
1177
1178 RTReqQueueDestroy(pThis->hSlirpReqQueue);
1179 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
1180
1181 RTReqQueueDestroy(pThis->hUrgRecvReqQueue);
1182 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
1183
1184 RTReqQueueDestroy(pThis->hRecvReqQueue);
1185 pThis->hRecvReqQueue = NIL_RTREQQUEUE;
1186
1187 RTSemEventDestroy(pThis->EventRecv);
1188 pThis->EventRecv = NIL_RTSEMEVENT;
1189
1190 RTSemEventDestroy(pThis->EventUrgRecv);
1191 pThis->EventUrgRecv = NIL_RTSEMEVENT;
1192
1193 if (RTCritSectIsInitialized(&pThis->DevAccessLock))
1194 RTCritSectDelete(&pThis->DevAccessLock);
1195
1196 if (RTCritSectIsInitialized(&pThis->XmitLock))
1197 RTCritSectDelete(&pThis->XmitLock);
1198
1199#ifndef RT_OS_WINDOWS
1200 RTPipeClose(pThis->hPipeRead);
1201 RTPipeClose(pThis->hPipeWrite);
1202#endif
1203
1204#ifdef RT_OS_DARWIN
1205 /* Cleanup the DNS watcher. */
1206 if (pThis->hRunLoopSrcDnsWatcher != NULL)
1207 {
1208 CFRunLoopRef hRunLoopMain = CFRunLoopGetMain();
1209 CFRetain(hRunLoopMain);
1210 CFRunLoopRemoveSource(hRunLoopMain, pThis->hRunLoopSrcDnsWatcher, kCFRunLoopCommonModes);
1211 CFRelease(hRunLoopMain);
1212 CFRelease(pThis->hRunLoopSrcDnsWatcher);
1213 pThis->hRunLoopSrcDnsWatcher = NULL;
1214 }
1215#endif
1216}
1217
1218/**
1219 * Construct a NAT network transport driver instance.
1220 *
1221 * @copydoc FNPDMDRVCONSTRUCT
1222 */
1223static DECLCALLBACK(int) drvNATConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags)
1224{
1225 int rc = 0;
1226
1227 /* Construct PDRVNAT */
1228
1229 RT_NOREF(fFlags);
1230 PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns);
1231 PDRVNAT pThis = PDMINS_2_DATA(pDrvIns, PDRVNAT);
1232
1233 /*
1234 * Init the static parts.
1235 */
1236 pThis->pDrvIns = pDrvIns;
1237 pThis->pNATState = (SlirpState *)RTMemAlloc(sizeof(SlirpState));
1238 if(pThis->pNATState == NULL)
1239 {
1240 return VERR_NO_MEMORY;
1241 }
1242 else
1243 {
1244 pThis->pNATState->nsock = 0;
1245 pThis->pNATState->pTimerHead = NULL;
1246 pThis->pNATState->polls = (struct pollfd *)RTMemAlloc(64 * sizeof(struct pollfd));
1247 pThis->pNATState->uPollCap = 64;
1248 }
1249 pThis->hSlirpReqQueue = NIL_RTREQQUEUE;
1250 pThis->hUrgRecvReqQueue = NIL_RTREQQUEUE;
1251 pThis->hHostResQueue = NIL_RTREQQUEUE;
1252 pThis->EventRecv = NIL_RTSEMEVENT;
1253 pThis->EventUrgRecv = NIL_RTSEMEVENT;
1254#ifdef RT_OS_DARWIN
1255 pThis->hRunLoopSrcDnsWatcher = NULL;
1256#endif
1257
1258 /* IBase */
1259 pDrvIns->IBase.pfnQueryInterface = drvNATQueryInterface;
1260
1261 /* INetwork */
1262 pThis->INetworkUp.pfnBeginXmit = drvNATNetworkUp_BeginXmit;
1263 pThis->INetworkUp.pfnAllocBuf = drvNATNetworkUp_AllocBuf;
1264 pThis->INetworkUp.pfnFreeBuf = drvNATNetworkUp_FreeBuf;
1265 pThis->INetworkUp.pfnSendBuf = drvNATNetworkUp_SendBuf;
1266 pThis->INetworkUp.pfnEndXmit = drvNATNetworkUp_EndXmit;
1267 pThis->INetworkUp.pfnSetPromiscuousMode = drvNATNetworkUp_SetPromiscuousMode;
1268 pThis->INetworkUp.pfnNotifyLinkChanged = drvNATNetworkUp_NotifyLinkChanged;
1269
1270 /* NAT engine configuration */
1271 pThis->INetworkNATCfg.pfnRedirectRuleCommand = drvNATNetworkNatConfigRedirect;
1272 pThis->INetworkNATCfg.pfnNotifyDnsChanged = NULL;
1273
1274 /*
1275 * Validate the config.
1276 */
1277 PDMDRV_VALIDATE_CONFIG_RETURN(pDrvIns,
1278 "PassDomain"
1279 "|TFTPPrefix"
1280 "|BootFile"
1281 "|Network"
1282 "|NextServer"
1283 "|DNSProxy"
1284 "|BindIP"
1285 "|UseHostResolver"
1286 "|SlirpMTU"
1287 "|AliasMode"
1288 "|SockRcv"
1289 "|SockSnd"
1290 "|TcpRcv"
1291 "|TcpSnd"
1292 "|ICMPCacheLimit"
1293 "|SoMaxConnection"
1294 "|LocalhostReachable"
1295 "|HostResolverMappings"
1296 , "PortForwarding");
1297
1298 /*
1299 * Get the configuration settings.
1300 */
1301 bool fPassDomain = true;
1302 GET_BOOL(rc, pDrvIns, pCfg, "PassDomain", fPassDomain);
1303
1304 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "TFTPPrefix", pThis->pszTFTPPrefix);
1305 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "BootFile", pThis->pszBootFile);
1306 GET_STRING_ALLOC(rc, pDrvIns, pCfg, "NextServer", pThis->pszNextServer);
1307
1308 int fDNSProxy = 0;
1309 GET_S32(rc, pDrvIns, pCfg, "DNSProxy", fDNSProxy);
1310 int fUseHostResolver = 0;
1311 GET_S32(rc, pDrvIns, pCfg, "UseHostResolver", fUseHostResolver);
1312 int MTU = 1500;
1313 GET_S32(rc, pDrvIns, pCfg, "SlirpMTU", MTU);
1314 int i32AliasMode = 0;
1315 int i32MainAliasMode = 0;
1316 GET_S32(rc, pDrvIns, pCfg, "AliasMode", i32MainAliasMode);
1317 int iIcmpCacheLimit = 100;
1318 GET_S32(rc, pDrvIns, pCfg, "ICMPCacheLimit", iIcmpCacheLimit);
1319 bool fLocalhostReachable = false;
1320 GET_BOOL(rc, pDrvIns, pCfg, "LocalhostReachable", fLocalhostReachable);
1321
1322 i32AliasMode |= (i32MainAliasMode & 0x1 ? 0x1 : 0);
1323 i32AliasMode |= (i32MainAliasMode & 0x2 ? 0x40 : 0);
1324 i32AliasMode |= (i32MainAliasMode & 0x4 ? 0x4 : 0);
1325 int i32SoMaxConn = 10;
1326 GET_S32(rc, pDrvIns, pCfg, "SoMaxConnection", i32SoMaxConn);
1327 /*
1328 * Query the network port interface.
1329 */
1330 pThis->pIAboveNet = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKDOWN);
1331 if (!pThis->pIAboveNet)
1332 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1333 N_("Configuration error: the above device/driver didn't "
1334 "export the network port interface"));
1335 pThis->pIAboveConfig = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMINETWORKCONFIG);
1336 if (!pThis->pIAboveConfig)
1337 return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_ABOVE,
1338 N_("Configuration error: the above device/driver didn't "
1339 "export the network config interface"));
1340
1341 /* Generate a network address for this network card. */
1342 char szNetwork[32]; /* xxx.xxx.xxx.xxx/yy */
1343 GET_STRING(rc, pDrvIns, pCfg, "Network", szNetwork[0], sizeof(szNetwork));
1344 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1345 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("NAT%d: Configuration error: missing network"),
1346 pDrvIns->iInstance);
1347
1348 RTNETADDRIPV4 Network, Netmask;
1349
1350 rc = RTCidrStrToIPv4(szNetwork, &Network, &Netmask);
1351 if (RT_FAILURE(rc))
1352 return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS,
1353 N_("NAT#%d: Configuration error: network '%s' describes not a valid IPv4 network"),
1354 pDrvIns->iInstance, szNetwork);
1355
1356 /* Construct Libslirp Config and Initialzie Slirp */
1357
1358 LogFlow(("Here is what is coming out of the vbox config:\n \
1359 Network: %lu\n \
1360 Netmask: %lu\n", Network, Netmask));
1361
1362#ifndef RT_OS_WINDOWS
1363 struct in_addr vnetwork = RTNetIPv4AddrHEToInAddr(&Network);
1364 struct in_addr vnetmask = RTNetIPv4AddrHEToInAddr(&Netmask);
1365 struct in_addr vhost = RTNetInAddrFromU8(10, 0, 2, 2);
1366 struct in_addr vdhcp_start = RTNetInAddrFromU8(10, 0, 2, 15);
1367 struct in_addr vnameserver = RTNetInAddrFromU8(10, 0, 2, 3);
1368#else
1369 struct in_addr vnetwork;
1370 vnetwork.S_un.S_addr = RT_BSWAP_U32(Network.u);
1371
1372 struct in_addr vnetmask;
1373 vnetmask.S_un.S_addr = RT_BSWAP_U32(Netmask.u);
1374
1375 struct in_addr vhost;
1376 vhost.S_un.S_addr = RT_BSWAP_U32(0x0a000202);
1377
1378 struct in_addr vdhcp_start;
1379 vdhcp_start.S_un.S_addr = RT_BSWAP_U32(0x0a00020f);
1380
1381 struct in_addr vnameserver;
1382 vnameserver.S_un.S_addr = RT_BSWAP_U32(0x0a000203);
1383#endif
1384
1385 SlirpConfig *pSlirpCfg = new SlirpConfig { 0 };
1386
1387 pSlirpCfg->version = 4;
1388 pSlirpCfg->restricted = false;
1389 pSlirpCfg->in_enabled = true;
1390 pSlirpCfg->vnetwork = vnetwork;
1391 pSlirpCfg->vnetmask = vnetmask;
1392 pSlirpCfg->vhost = vhost;
1393 pSlirpCfg->in6_enabled = true;
1394
1395 inet_pton(AF_INET6, "fd00::", &pSlirpCfg->vprefix_addr6);
1396 pSlirpCfg->vprefix_len = 64;
1397 inet_pton(AF_INET6, "fd00::2", &pSlirpCfg->vhost6);
1398
1399 pSlirpCfg->vhostname = "vbox";
1400 pSlirpCfg->tftp_server_name = pThis->pszNextServer;
1401 pSlirpCfg->tftp_path = pThis->pszTFTPPrefix;
1402 pSlirpCfg->bootfile = pThis->pszBootFile;
1403 pSlirpCfg->vdhcp_start = vdhcp_start;
1404 pSlirpCfg->vnameserver = vnameserver;
1405
1406#ifndef RT_OS_WINDOWS
1407 inet_pton(AF_INET6, "fd00::3", &pSlirpCfg->vnameserver6);
1408#else
1409 inet_pton(23, "fd00::3", &pSlirpCfg->vnameserver6);
1410#endif
1411
1412 pSlirpCfg->vdnssearch = NULL;
1413 pSlirpCfg->vdomainname = NULL;
1414
1415 SlirpCb *slirpCallbacks = (struct SlirpCb *)RTMemAlloc(sizeof(SlirpCb));
1416
1417 slirpCallbacks->send_packet = &slirpSendPacketCb;
1418 slirpCallbacks->guest_error = &slirpGuestErrorCb;
1419 slirpCallbacks->clock_get_ns = &slirpClockGetNsCb;
1420 slirpCallbacks->timer_new = &slirpTimerNewCb;
1421 slirpCallbacks->timer_free = &slirpTimerFreeCb;
1422 slirpCallbacks->timer_mod = &slirpTimerModCb;
1423 slirpCallbacks->register_poll_fd = &drvNAT_RegisterPoll;
1424 slirpCallbacks->unregister_poll_fd = &drvNAT_UnregisterPoll;
1425 slirpCallbacks->notify = &slirpNotifyCb;
1426 slirpCallbacks->init_completed = NULL;
1427 slirpCallbacks->timer_new_opaque = NULL;
1428
1429 Slirp *pSlirp = slirp_new(/* cfg */ pSlirpCfg, /* callbacks */ slirpCallbacks, /* opaque */ pThis);
1430
1431 if (pSlirp == NULL)
1432 return VERR_INVALID_POINTER;
1433
1434 pThis->pNATState->pSlirp = pSlirp;
1435
1436 // pThis->pNATState->polls = NULL;
1437
1438 rc = drvNATConstructRedir(pDrvIns->iInstance, pThis, pCfg, &Network);
1439 AssertLogRelRCReturn(rc, rc);
1440
1441 rc = PDMDrvHlpSSMRegisterLoadDone(pDrvIns, NULL);
1442 AssertLogRelRCReturn(rc, rc);
1443
1444 rc = RTReqQueueCreate(&pThis->hSlirpReqQueue);
1445 AssertLogRelRCReturn(rc, rc);
1446
1447 rc = RTReqQueueCreate(&pThis->hRecvReqQueue);
1448 AssertLogRelRCReturn(rc, rc);
1449
1450 rc = RTReqQueueCreate(&pThis->hUrgRecvReqQueue);
1451 AssertLogRelRCReturn(rc, rc);
1452
1453 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pRecvThread, pThis, drvNATRecv,
1454 drvNATRecvWakeup, 256 * _1K, RTTHREADTYPE_IO, "NATRX");
1455 AssertRCReturn(rc, rc);
1456
1457 rc = RTSemEventCreate(&pThis->EventRecv);
1458 AssertRCReturn(rc, rc);
1459
1460 rc = RTSemEventCreate(&pThis->EventUrgRecv);
1461 AssertRCReturn(rc, rc);
1462
1463 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pUrgRecvThread, pThis, drvNATUrgRecv,
1464 drvNATUrgRecvWakeup, 256 * _1K, RTTHREADTYPE_IO, "NATURGRX");
1465 AssertRCReturn(rc, rc);
1466
1467 rc = RTReqQueueCreate(&pThis->hHostResQueue);
1468 AssertRCReturn(rc, rc);
1469
1470 rc = PDMDrvHlpThreadCreate(pThis->pDrvIns, &pThis->pHostResThread,
1471 pThis, drvNATHostResThread, drvNATHostResWakeup,
1472 128 * _1K, RTTHREADTYPE_IO, "HOSTRES");
1473 AssertRCReturn(rc, rc);
1474
1475 rc = RTCritSectInit(&pThis->DevAccessLock);
1476 AssertRCReturn(rc, rc);
1477
1478 rc = RTCritSectInit(&pThis->XmitLock);
1479 AssertRCReturn(rc, rc);
1480
1481 char szTmp[128];
1482 RTStrPrintf(szTmp, sizeof(szTmp), "nat%d", pDrvIns->iInstance);
1483 PDMDrvHlpDBGFInfoRegister(pDrvIns, szTmp, "NAT info.", drvNATInfo);
1484
1485#ifdef VBOX_WITH_STATISTICS
1486# define DRV_PROFILE_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_PROFILE, STAMUNIT_TICKS_PER_CALL, dsc)
1487# define DRV_COUNTING_COUNTER(name, dsc) REGISTER_COUNTER(name, pThis, STAMTYPE_COUNTER, STAMUNIT_COUNT, dsc)
1488# include "slirp/counters.h"
1489#endif
1490
1491#ifndef RT_OS_WINDOWS
1492 /*
1493 * Create the control pipe.
1494 */
1495 rc = RTPipeCreate(&pThis->hPipeRead, &pThis->hPipeWrite, 0 /*fFlags*/);
1496 AssertRCReturn(rc, rc);
1497#else
1498 pThis->hWakeupEvent = CreateEvent(NULL, FALSE, FALSE, NULL); /* auto-reset event */
1499 pThis->pNATState->phEvents[VBOX_WAKEUP_EVENT_INDEX] = pThis->hWakeupEvent;
1500 pThis->pNATState->phEvents[VBOX_SOCKET_EVENT_INDEX] = CreateEvent(NULL, FALSE, FALSE, NULL);
1501#endif
1502
1503 rc = PDMDrvHlpThreadCreate(pDrvIns, &pThis->pSlirpThread, pThis, drvNATAsyncIoThread,
1504 drvNATAsyncIoWakeup, 256 * _1K, RTTHREADTYPE_IO, "NAT");
1505 AssertRCReturn(rc, rc);
1506
1507 pThis->enmLinkState = pThis->enmLinkStateWant = PDMNETWORKLINKSTATE_UP;
1508
1509 return rc;
1510}
1511
1512/**
1513 * NAT network transport driver registration record.
1514 */
1515const PDMDRVREG g_DrvNATlibslirp =
1516{
1517 /* u32Version */
1518 PDM_DRVREG_VERSION,
1519 /* szName */
1520 "NAT",
1521 /* szRCMod */
1522 "",
1523 /* szR0Mod */
1524 "",
1525 /* pszDescription */
1526 "NATlibslrip Network Transport Driver",
1527 /* fFlags */
1528 PDM_DRVREG_FLAGS_HOST_BITS_DEFAULT,
1529 /* fClass. */
1530 PDM_DRVREG_CLASS_NETWORK,
1531 /* cMaxInstances */
1532 ~0U,
1533 /* cbInstance */
1534 sizeof(DRVNAT),
1535 /* pfnConstruct */
1536 drvNATConstruct,
1537 /* pfnDestruct */
1538 drvNATDestruct,
1539 /* pfnRelocate */
1540 NULL,
1541 /* pfnIOCtl */
1542 NULL,
1543 /* pfnPowerOn */
1544 NULL,
1545 /* pfnReset */
1546 NULL,
1547 /* pfnSuspend */
1548 NULL,
1549 /* pfnResume */
1550 NULL,
1551 /* pfnAttach */
1552 NULL,
1553 /* pfnDetach */
1554 NULL,
1555 /* pfnPowerOff */
1556 NULL,
1557 /* pfnSoftReset */
1558 NULL,
1559 /* u32EndVersion */
1560 PDM_DRVREG_VERSION
1561};
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette