VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 14506

Last change on this file since 14506 was 14506, checked in by vboxsync, 16 years ago

Linux hostif: Added packet segmentation for host-to-guest transfer.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.0 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 14506 2008-11-24 08:46:58Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * Sun Microsystems, Inc. confidential
10 * All rights reserved
11 */
12
13/*******************************************************************************
14* Header Files *
15*******************************************************************************/
16#include "the-linux-kernel.h"
17#include "version-generated.h"
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/rtnetlink.h>
21
22#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
23#include <VBox/log.h>
24#include <VBox/err.h>
25#include <iprt/alloca.h>
26#include <iprt/assert.h>
27#include <iprt/spinlock.h>
28#include <iprt/semaphore.h>
29#include <iprt/initterm.h>
30#include <iprt/process.h>
31#include <iprt/mem.h>
32#include <iprt/log.h>
33#include <iprt/mp.h>
34#include <iprt/mem.h>
35#include <iprt/time.h>
36
37#define VBOXNETFLT_OS_SPECFIC 1
38#include "../VBoxNetFltInternal.h"
39
40#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
41 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
42#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
43 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
44#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
45 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
46
47#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
48
49#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
50# define VBOX_SKB_TRANSPORT_HDR(skb) skb->transport_header
51# define VBOX_SKB_NETWORK_HDR(skb) skb->network_header
52# define VBOX_SKB_MAC_HDR(skb) skb->mac_header
53#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
54# define VBOX_SKB_TRANSPORT_HDR(skb) skb->h.raw
55# define VBOX_SKB_NETWORK_HDR(skb) skb->nh.raw
56# define VBOX_SKB_MAC_HDR(skb) skb->mac.raw
57#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63static int VBoxNetFltLinuxInit(void);
64static void VBoxNetFltLinuxUnload(void);
65
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/**
71 * The (common) global data.
72 */
73static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
74
75module_init(VBoxNetFltLinuxInit);
76module_exit(VBoxNetFltLinuxUnload);
77
78MODULE_AUTHOR("Sun Microsystems, Inc.");
79MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
80MODULE_LICENSE("GPL");
81#ifdef MODULE_VERSION
82# define xstr(s) str(s)
83# define str(s) #s
84MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
85#endif
86
87/**
88 * The (common) global data.
89 */
90static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
91
92
93/**
94 * Initialize module.
95 *
96 * @returns appropriate status code.
97 */
98static int __init VBoxNetFltLinuxInit(void)
99{
100 int rc;
101 Log(("VBoxNetFltLinuxInit\n"));
102
103 /*
104 * Initialize IPRT.
105 */
106 rc = RTR0Init(0);
107 if (RT_SUCCESS(rc))
108 {
109 /*
110 * Initialize the globals and connect to the support driver.
111 *
112 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
113 * for establishing the connect to the support driver.
114 */
115 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
116 rc = vboxNetFltInitGlobals(&g_VBoxNetFltGlobals);
117 if (RT_SUCCESS(rc))
118 {
119 LogRel(("VBoxNetFlt: Successfully started.\n"));
120 return 0;
121 }
122
123 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
124 RTR0Term();
125 }
126 else
127 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
128
129 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
130 return RTErrConvertToErrno(rc);
131}
132
133
134/**
135 * Unload the module.
136 *
137 * @todo We have to prevent this if we're busy!
138 */
139static void __exit VBoxNetFltLinuxUnload(void)
140{
141 int rc;
142 Log(("VBoxNetFltLinuxUnload\n"));
143 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
144
145 /*
146 * Undo the work done during start (in reverse order).
147 */
148 rc = vboxNetFltTryDeleteGlobals(&g_VBoxNetFltGlobals);
149 AssertRC(rc); NOREF(rc);
150
151 RTR0Term();
152
153 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
154
155 Log(("VBoxNetFltLinuxUnload - done\n"));
156}
157
158
159/**
160 * Reads and retains the host interface handle.
161 *
162 * @returns The handle, NULL if detached.
163 * @param pThis
164 */
165DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
166{
167#if 0
168 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
169 struct net_device *pDev = NULL;
170
171 Log(("vboxNetFltLinuxRetainNetDev\n"));
172 /*
173 * Be careful here to avoid problems racing the detached callback.
174 */
175 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
176 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
177 {
178 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
179 if (pDev)
180 {
181 dev_hold(pDev);
182 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
183 }
184 }
185 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
186
187 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
188 return pDev;
189#else
190 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
191#endif
192}
193
194
195/**
196 * Release the host interface handle previously retained
197 * by vboxNetFltLinuxRetainNetDev.
198 *
199 * @param pThis The instance.
200 * @param pDev The vboxNetFltLinuxRetainNetDev
201 * return value, NULL is fine.
202 */
203DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
204{
205#if 0
206 Log(("vboxNetFltLinuxReleaseNetDev\n"));
207 NOREF(pThis);
208 if (pDev)
209 {
210 dev_put(pDev);
211 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
212 }
213 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
214#endif
215}
216
217#define VBOXNETFLT_CB_TAG 0xA1C9D7C3
218#define VBOXNETFLT_SKB_CB(skb) (*(uint32_t*)&((skb)->cb[0]))
219
220/**
221 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
222 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
223 *
224 * @returns true / false accordingly.
225 * @param pBuf The sk_buff.
226 */
227DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
228{
229 return VBOXNETFLT_SKB_CB(pBuf) == VBOXNETFLT_CB_TAG ;
230}
231
232
233/**
234 * Internal worker that create a linux sk_buff for a
235 * (scatter/)gather list.
236 *
237 * @returns Pointer to the sk_buff.
238 * @param pThis The instance.
239 * @param pSG The (scatter/)gather list.
240 */
241static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
242{
243 struct sk_buff *pPkt;
244 struct net_device *pDev;
245 /*
246 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
247 * will only contain one single segment.
248 */
249 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
250 {
251 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
252 return NULL;
253 }
254 if (pSG->cbTotal == 0)
255 {
256 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
257 return NULL;
258 }
259
260 /*
261 * Allocate a packet and copy over the data.
262 *
263 */
264 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
265 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
266 if (pPkt)
267 {
268 pPkt->dev = pDev;
269 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
270 skb_reserve(pPkt, NET_IP_ALIGN);
271 skb_put(pPkt, pSG->cbTotal);
272 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
273 /* Set protocol and packet_type fields. */
274 pPkt->protocol = eth_type_trans(pPkt, pDev);
275 pPkt->ip_summed = CHECKSUM_NONE;
276 if (fDstWire)
277 {
278 VBOX_SKB_NETWORK_HDR(pPkt) = pPkt->data;
279 /* Restore ethernet header back. */
280 skb_push(pPkt, ETH_HLEN);
281 }
282 VBOXNETFLT_SKB_CB(pPkt) = VBOXNETFLT_CB_TAG;
283
284 return pPkt;
285 }
286 else
287 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
288 pSG->pvUserData = NULL;
289
290 return NULL;
291}
292
293
294/**
295 * Initializes a SG list from an sk_buff.
296 *
297 * @returns Number of segments.
298 * @param pThis The instance.
299 * @param pBuf The sk_buff.
300 * @param pSG The SG.
301 * @param pvFrame The frame pointer, optional.
302 * @param cSegs The number of segments allocated for the SG.
303 * This should match the number in the mbuf exactly!
304 * @param fSrc The source of the frame.
305 */
306DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
307{
308 int i;
309 NOREF(pThis);
310
311 Assert(!skb_shinfo(pBuf)->frag_list);
312 pSG->pvOwnerData = NULL;
313 pSG->pvUserData = NULL;
314 pSG->pvUserData2 = NULL;
315 pSG->cUsers = 1;
316 pSG->fFlags = INTNETSG_FLAGS_TEMP;
317 pSG->cSegsAlloc = cSegs;
318
319 if (fSrc & INTNETTRUNKDIR_WIRE)
320 {
321 /*
322 * The packet came from wire, ethernet header was removed by device driver.
323 * Restore it.
324 */
325 skb_push(pBuf, ETH_HLEN);
326 }
327 pSG->cbTotal = pBuf->len;
328#ifdef VBOXNETFLT_SG_SUPPORT
329 pSG->aSegs[0].cb = skb_headlen(pBuf);
330 pSG->aSegs[0].pv = pBuf->data;
331 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
332
333 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
334 {
335 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
336 pSG->aSegs[i+1].cb = pFrag->size;
337 pSG->aSegs[i+1].pv = kmap(pFrag->page);
338 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
339 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
340 }
341 pSG->cSegsUsed = ++i;
342#else
343 pSG->aSegs[0].cb = pBuf->len;
344 pSG->aSegs[0].pv = pBuf->data;
345 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
346 pSG->cSegsUsed = i = 1;
347#endif
348
349
350#ifdef PADD_RUNT_FRAMES_FROM_HOST
351 /*
352 * Add a trailer if the frame is too small.
353 *
354 * Since we're getting to the packet before it is framed, it has not
355 * yet been padded. The current solution is to add a segment pointing
356 * to a buffer containing all zeros and pray that works for all frames...
357 */
358 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
359 {
360 static uint8_t const s_abZero[128] = {0};
361
362 AssertReturnVoid(i < cSegs);
363
364 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
365 pSG->aSegs[i].pv = (void *)&s_abZero[0];
366 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
367 pSG->cbTotal = 60;
368 pSG->cSegsUsed++;
369 }
370#endif
371 Log2(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
372 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
373 for (i = 0; i < pSG->cSegsUsed; i++)
374 Log2(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
375 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
376}
377
378/**
379 * Packet handler,
380 *
381 * @returns 0 or EJUSTRETURN.
382 * @param pThis The instance.
383 * @param pMBuf The mbuf.
384 * @param pvFrame The start of the frame, optional.
385 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
386 * @param eProtocol The protocol.
387 */
388static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
389 struct net_device *pSkbDev,
390 struct packet_type *pPacketType,
391 struct net_device *pOrigDev)
392{
393 PVBOXNETFLTINS pThis;
394 struct net_device *pDev;
395 /*
396 * Drop it immediately?
397 */
398 Log2(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p pOrigDev=%p\n",
399 pBuf, pSkbDev, pPacketType, pOrigDev));
400 if (!pBuf)
401 return 0;
402 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
403 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
404 if (pThis->u.s.pDev != pSkbDev)
405 {
406 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
407 return 0;
408 }
409
410 if (vboxNetFltLinuxSkBufIsOur(pBuf))
411 {
412 dev_kfree_skb(pBuf);
413 return 0;
414 }
415
416 /* Add the packet to transmit queue and schedule the bottom half. */
417 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
418 schedule_work(&pThis->u.s.XmitTask);
419 Log2(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
420 &pThis->u.s.XmitTask, pBuf));
421 /* It does not really matter what we return, it is ignored by the kernel. */
422 return 0;
423}
424
425static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
426{
427#ifdef VBOXNETFLT_SG_SUPPORT
428 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
429#else
430 unsigned cSegs = 1;
431#endif
432#ifdef PADD_RUNT_FRAMES_FROM_HOST
433 /*
434 * Add a trailer if the frame is too small.
435 */
436 if (pBuf->len < 60)
437 cSegs++;
438#endif
439 return cSegs;
440}
441
442/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
443static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
444{
445#ifdef VBOXNETFLT_SG_SUPPORT
446 int i;
447
448 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
449 {
450 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
451 kunmap(pSG->aSegs[i+1].pv);
452 }
453#endif
454
455 dev_kfree_skb(pBuf);
456}
457
458static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
459{
460 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
461 if (cSegs < MAX_SKB_FRAGS)
462 {
463 uint8_t *pTmp;
464 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
465 if (!pSG)
466 {
467 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
468 return VERR_NO_MEMORY;
469 }
470 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
471
472 pTmp = pSG->aSegs[0].pv;
473 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
474 " <-- (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
475 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
476 (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire",
477 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
478 pSG->cbTotal));
479 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
480 Log2(("VBoxNetFlt: Dropping the sk_buff.\n"));
481 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
482 }
483
484 return VINF_SUCCESS;
485}
486
487static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
488{
489 struct sk_buff *pNext, *pSegment = NULL;
490 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
491
492#ifndef VBOXNETFLT_SG_SUPPORT
493 /*
494 * Get rid of fragmented packets, they cause too much trouble.
495 */
496 struct sk_buff *pCopy = skb_copy(pBuf, GFP_KERNEL);
497 kfree_skb(pBuf);
498 if (!pCopy)
499 {
500 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
501 return;
502 }
503 pBuf = pCopy;
504#endif
505
506 Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
507 pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
508
509 if (skb_is_gso(pBuf))
510 {
511 /* Need to segment the packet */
512 struct sk_buff *pSegments = skb_gso_segment(pBuf, 0); /* No features, very dumb device */
513 pBuf->next = pSegments;
514 }
515 /*
516 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
517 */
518 for (pSegment = pBuf; pSegment; pSegment = pNext)
519 {
520 pNext = pSegment->next;
521 pSegment->next = 0;
522 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
523 }
524}
525
526static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
527{
528 struct sk_buff *pBuf;
529 bool fActive;
530 PVBOXNETFLTINS pThis;
531 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
532
533 Log2(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
534 pThis = VBOX_FLT_XT_TO_INST(pWork);
535 /*
536 * Active? Retain the instance and increment the busy counter.
537 */
538 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
539 fActive = ASMAtomicUoReadBool(&pThis->fActive);
540 if (fActive)
541 vboxNetFltRetain(pThis, true /* fBusy */);
542 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
543 if (!fActive)
544 return;
545
546 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
547 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
548
549 vboxNetFltRelease(pThis, true /* fBusy */);
550}
551
552/**
553 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
554 *
555 * @returns VBox status code.
556 * @param pThis The instance.
557 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
558 * flood the release log.
559 */
560static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
561{
562 struct packet_type *pt;
563 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
564
565 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
566
567 if (!pDev)
568 {
569 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
570 return VERR_INTNET_FLT_IF_NOT_FOUND;
571 }
572
573 dev_hold(pDev);
574 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
575 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
576 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
577
578 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
579 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
580 /*
581 * Get the mac address while we still have a valid ifnet reference.
582 */
583 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
584
585 pt = &pThis->u.s.PacketType;
586 pt->type = __constant_htons(ETH_P_ALL);
587 pt->dev = pDev;
588 pt->func = vboxNetFltLinuxPacketHandler;
589 dev_add_pack(pt);
590 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
591 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
592 if (pDev)
593 {
594 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
595 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
596 pDev = NULL; /* don't dereference it */
597 }
598 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
599 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
600
601 /* Release the interface on failure. */
602 if (pDev)
603 {
604 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
605 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
606 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
607 dev_put(pDev);
608 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
609 }
610
611 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
612 return VINF_SUCCESS;
613}
614
615
616static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
617{
618 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
619
620 Assert(!pThis->fDisconnectedFromHost);
621 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
622 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
623 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
624 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
625 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
626
627 dev_remove_pack(&pThis->u.s.PacketType);
628 skb_queue_purge(&pThis->u.s.XmitQueue);
629 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
630 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
631 dev_put(pDev);
632
633 return NOTIFY_OK;
634}
635
636static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
637{
638 /* Check if we are not suspended and promiscuous mode has not been set. */
639 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
640 {
641 /* Note that there is no need for locking as the kernel got hold of the lock already. */
642 dev_set_promiscuity(pDev, 1);
643 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
644 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
645 }
646 else
647 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
648 return NOTIFY_OK;
649}
650
651static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
652{
653 /* Undo promiscuous mode if we has set it. */
654 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
655 {
656 /* Note that there is no need for locking as the kernel got hold of the lock already. */
657 dev_set_promiscuity(pDev, -1);
658 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
659 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
660 }
661 else
662 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
663 return NOTIFY_OK;
664}
665
666static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
667
668{
669 int rc;
670#ifdef DEBUG
671 char *pszEvent = "<unknown>";
672#endif
673 struct net_device *pDev = (struct net_device *)ptr;
674 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
675
676#ifdef DEBUG
677 switch (ulEventType)
678 {
679 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
680 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
681 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
682 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
683 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
684 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
685 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
686 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
687 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
688 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
689 }
690 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
691 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
692#endif
693 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
694 {
695 vboxNetFltLinuxAttachToInterface(pThis, pDev);
696 }
697 else
698 {
699 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
700 if (pDev != ptr)
701 return NOTIFY_OK;
702 rc = NOTIFY_OK;
703 switch (ulEventType)
704 {
705 case NETDEV_UNREGISTER:
706 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
707 break;
708 case NETDEV_UP:
709 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
710 break;
711 case NETDEV_GOING_DOWN:
712 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
713 break;
714 case NETDEV_CHANGENAME:
715 break;
716 }
717 }
718
719 return rc;
720}
721
722bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
723{
724 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
725}
726
727
728int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
729{
730 uint8_t *pTmp;
731 struct net_device * pDev;
732 int err;
733 int rc = VINF_SUCCESS;
734
735 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
736
737 pTmp = pSG->aSegs[0].pv;
738
739 pDev = vboxNetFltLinuxRetainNetDev(pThis);
740 if (pDev)
741 {
742 /*
743 * Create a sk_buff for the gather list and push it onto the wire.
744 */
745 if (fDst & INTNETTRUNKDIR_WIRE)
746 {
747 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
748 if (pBuf)
749 {
750 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
751 " --> (wire)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
752 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
753 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
754 pSG->cbTotal));
755 err = dev_queue_xmit(pBuf);
756 if (err)
757 rc = RTErrConvertFromErrno(err);
758 }
759 else
760 rc = VERR_NO_MEMORY;
761 }
762
763 /*
764 * Create a sk_buff for the gather list and push it onto the host stack.
765 */
766 if (fDst & INTNETTRUNKDIR_HOST)
767 {
768 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
769 if (pBuf)
770 {
771 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
772 " --> (host)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
773 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
774 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
775 pSG->cbTotal));
776 err = netif_rx_ni(pBuf);
777 if (err)
778 rc = RTErrConvertFromErrno(err);
779 }
780 else
781 rc = VERR_NO_MEMORY;
782 }
783
784 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
785 }
786
787 return rc;
788}
789
790
791bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
792{
793 bool fRc = false;
794 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
795 if (pDev)
796 {
797 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
798 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
799 }
800 return fRc;
801}
802
803
804void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
805{
806 *pMac = pThis->u.s.Mac;
807}
808
809
810bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
811{
812 /* ASSUMES that the MAC address never changes. */
813 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
814 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
815 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
816}
817
818
819void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
820{
821 struct net_device * pDev;
822
823 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s\n",
824 pThis, pThis->szName, fActive?"true":"false"));
825
826 pDev = vboxNetFltLinuxRetainNetDev(pThis);
827 if (pDev)
828 {
829 /*
830 * This api is a bit weird, the best reference is the code.
831 *
832 * Also, we have a bit or race conditions wrt the maintance of
833 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
834 */
835 u_int16_t fIf;
836 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
837 if (fActive)
838 {
839 int err = 0;
840 Assert(!pThis->u.s.fPromiscuousSet);
841
842#if 0
843 /*
844 * Try bring the interface up and running if it's down.
845 */
846 fIf = dev_get_flags(pDev);
847 if ((fIf & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
848 {
849 rtnl_lock();
850 err = dev_change_flags(pDev, fIf | IFF_UP);
851 rtnl_unlock();
852 fIf = dev_get_flags(pDev);
853 }
854
855 /*
856 * Is it already up? If it isn't, leave it to the link event or
857 * we'll upset if_pcount (as stated above, ifnet_set_promiscuous is weird).
858 */
859 if ((fIf & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)
860 && !ASMAtomicReadBool(&pThis->u.s.fPromiscuousSet))
861 {
862#endif
863 rtnl_lock();
864 dev_set_promiscuity(pDev, 1);
865 rtnl_unlock();
866 pThis->u.s.fPromiscuousSet = true;
867 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
868#if 0
869 /* check if it actually worked, this stuff is not always behaving well. */
870 if (!(dev_get_flags(pDev) & IFF_PROMISC))
871 {
872 err = dev_change_flags(pDev, fIf | IFF_PROMISC);
873 if (!err)
874 Log(("vboxNetFlt: fixed IFF_PROMISC on %s (%d->%d)\n", pThis->szName, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
875 else
876 Log(("VBoxNetFlt: failed to fix IFF_PROMISC on %s, err=%d (%d->%d)\n",
877 pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
878 }
879#endif
880#if 0
881 }
882 else if (!err)
883 Log(("VBoxNetFlt: Waiting for the link to come up... (%d->%d)\n", cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
884 if (err)
885 LogRel(("VBoxNetFlt: Failed to put '%s' into promiscuous mode, err=%d (%d->%d)\n", pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
886#endif
887 }
888 else
889 {
890 if (pThis->u.s.fPromiscuousSet)
891 {
892 rtnl_lock();
893 dev_set_promiscuity(pDev, -1);
894 rtnl_unlock();
895 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
896 }
897 pThis->u.s.fPromiscuousSet = false;
898
899 fIf = dev_get_flags(pDev);
900 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
901 }
902
903 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
904 }
905}
906
907
908int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
909{
910 /* Nothing to do here. */
911 return VINF_SUCCESS;
912}
913
914
915int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
916{
917 /* Nothing to do here. */
918 return VINF_SUCCESS;
919}
920
921
922void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
923{
924 struct net_device *pDev;
925 bool fRegistered;
926 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
927
928 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
929 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
930 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
931 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
932 if (fRegistered)
933 {
934 dev_remove_pack(&pThis->u.s.PacketType);
935 skb_queue_purge(&pThis->u.s.XmitQueue);
936 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
937 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
938 dev_put(pDev);
939 }
940 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
941 unregister_netdevice_notifier(&pThis->u.s.Notifier);
942}
943
944
945int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis)
946{
947 int err;
948 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
949 err = register_netdevice_notifier(&pThis->u.s.Notifier);
950 if (err)
951 return VERR_INTNET_FLT_IF_FAILED;
952 if (!pThis->u.s.fRegistered)
953 {
954 unregister_netdevice_notifier(&pThis->u.s.Notifier);
955 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
956 return VERR_INTNET_FLT_IF_NOT_FOUND;
957 }
958 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
959 return pThis->fDisconnectedFromHost ? VERR_INTNET_FLT_IF_FAILED : VINF_SUCCESS;
960}
961
962int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
963{
964 /*
965 * Init the linux specific members.
966 */
967 pThis->u.s.pDev = NULL;
968 pThis->u.s.fRegistered = false;
969 pThis->u.s.fPromiscuousSet = false;
970 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
971 skb_queue_head_init(&pThis->u.s.XmitQueue);
972#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
973 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
974#else
975 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, NULL);
976#endif
977
978 return VINF_SUCCESS;
979}
980
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette