VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 15840

Last change on this file since 15840 was 15840, checked in by vboxsync, 16 years ago

vboxnetflt: compile fix for older Linux kernels

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.7 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 15840 2009-01-07 16:37:24Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30
31#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/alloca.h>
35#include <iprt/assert.h>
36#include <iprt/spinlock.h>
37#include <iprt/semaphore.h>
38#include <iprt/initterm.h>
39#include <iprt/process.h>
40#include <iprt/mem.h>
41#include <iprt/log.h>
42#include <iprt/mp.h>
43#include <iprt/mem.h>
44#include <iprt/time.h>
45
46#define VBOXNETFLT_OS_SPECFIC 1
47#include "../VBoxNetFltInternal.h"
48
49#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
50 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
51#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
52 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
53#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
54 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
55
56#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
57
58#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
59# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
60# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
61#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
62# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
63# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
64#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
65
66#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
67# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
68#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
69# define CHECKSUM_PARTIAL CHECKSUM_HW
70# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
71# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
72# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
73# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
74# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
75#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
76
77#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
78# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
79 /* No features, very dumb device */
80# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
81#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
82# define VBOX_SKB_IS_GSO(skb) false
83# define VBOX_SKB_GSO_SEGMENT(skb) NULL
84#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
85
86#ifndef NET_IP_ALIGN
87# define NET_IP_ALIGN 2
88#endif
89
90#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
91unsigned dev_get_flags(const struct net_device *dev)
92{
93 unsigned flags;
94
95 flags = (dev->flags & ~(IFF_PROMISC |
96 IFF_ALLMULTI |
97 IFF_RUNNING)) |
98 (dev->gflags & (IFF_PROMISC |
99 IFF_ALLMULTI));
100
101 if (netif_running(dev) && netif_carrier_ok(dev))
102 flags |= IFF_RUNNING;
103
104 return flags;
105}
106#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
107
108/*******************************************************************************
109* Internal Functions *
110*******************************************************************************/
111static int VBoxNetFltLinuxInit(void);
112static void VBoxNetFltLinuxUnload(void);
113
114
115/*******************************************************************************
116* Global Variables *
117*******************************************************************************/
118/**
119 * The (common) global data.
120 */
121static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
122
123module_init(VBoxNetFltLinuxInit);
124module_exit(VBoxNetFltLinuxUnload);
125
126MODULE_AUTHOR("Sun Microsystems, Inc.");
127MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
128MODULE_LICENSE("GPL");
129#ifdef MODULE_VERSION
130# define xstr(s) str(s)
131# define str(s) #s
132MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
133#endif
134
135/**
136 * The (common) global data.
137 */
138static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
139
140
141/**
142 * Initialize module.
143 *
144 * @returns appropriate status code.
145 */
146static int __init VBoxNetFltLinuxInit(void)
147{
148 int rc;
149 Log(("VBoxNetFltLinuxInit\n"));
150
151 /*
152 * Initialize IPRT.
153 */
154 rc = RTR0Init(0);
155 if (RT_SUCCESS(rc))
156 {
157 /*
158 * Initialize the globals and connect to the support driver.
159 *
160 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
161 * for establishing the connect to the support driver.
162 */
163 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
164 rc = vboxNetFltInitGlobals(&g_VBoxNetFltGlobals);
165 if (RT_SUCCESS(rc))
166 {
167 LogRel(("VBoxNetFlt: Successfully started.\n"));
168 return 0;
169 }
170
171 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
172 RTR0Term();
173 }
174 else
175 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
176
177 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
178 return -RTErrConvertToErrno(rc);
179}
180
181
182/**
183 * Unload the module.
184 *
185 * @todo We have to prevent this if we're busy!
186 */
187static void __exit VBoxNetFltLinuxUnload(void)
188{
189 int rc;
190 Log(("VBoxNetFltLinuxUnload\n"));
191 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
192
193 /*
194 * Undo the work done during start (in reverse order).
195 */
196 rc = vboxNetFltTryDeleteGlobals(&g_VBoxNetFltGlobals);
197 AssertRC(rc); NOREF(rc);
198
199 RTR0Term();
200
201 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
202
203 Log(("VBoxNetFltLinuxUnload - done\n"));
204}
205
206
207/**
208 * Reads and retains the host interface handle.
209 *
210 * @returns The handle, NULL if detached.
211 * @param pThis
212 */
213DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
214{
215#if 0
216 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
217 struct net_device *pDev = NULL;
218
219 Log(("vboxNetFltLinuxRetainNetDev\n"));
220 /*
221 * Be careful here to avoid problems racing the detached callback.
222 */
223 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
224 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
225 {
226 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
227 if (pDev)
228 {
229 dev_hold(pDev);
230 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
231 }
232 }
233 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
234
235 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
236 return pDev;
237#else
238 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
239#endif
240}
241
242
243/**
244 * Release the host interface handle previously retained
245 * by vboxNetFltLinuxRetainNetDev.
246 *
247 * @param pThis The instance.
248 * @param pDev The vboxNetFltLinuxRetainNetDev
249 * return value, NULL is fine.
250 */
251DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
252{
253#if 0
254 Log(("vboxNetFltLinuxReleaseNetDev\n"));
255 NOREF(pThis);
256 if (pDev)
257 {
258 dev_put(pDev);
259 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
260 }
261 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
262#endif
263}
264
265#define VBOXNETFLT_CB_TAG 0xA1C9D7C3
266#define VBOXNETFLT_SKB_CB(skb) (*(uint32_t*)&((skb)->cb[0]))
267
268/**
269 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
270 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
271 *
272 * @returns true / false accordingly.
273 * @param pBuf The sk_buff.
274 */
275DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
276{
277 return VBOXNETFLT_SKB_CB(pBuf) == VBOXNETFLT_CB_TAG ;
278}
279
280
281/**
282 * Internal worker that create a linux sk_buff for a
283 * (scatter/)gather list.
284 *
285 * @returns Pointer to the sk_buff.
286 * @param pThis The instance.
287 * @param pSG The (scatter/)gather list.
288 */
289static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
290{
291 struct sk_buff *pPkt;
292 struct net_device *pDev;
293 /*
294 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
295 * will only contain one single segment.
296 */
297 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
298 {
299 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
300 return NULL;
301 }
302 if (pSG->cbTotal == 0)
303 {
304 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
305 return NULL;
306 }
307
308 /*
309 * Allocate a packet and copy over the data.
310 *
311 */
312 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
313 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
314 if (pPkt)
315 {
316 pPkt->dev = pDev;
317 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
318 skb_reserve(pPkt, NET_IP_ALIGN);
319 skb_put(pPkt, pSG->cbTotal);
320 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
321 /* Set protocol and packet_type fields. */
322 pPkt->protocol = eth_type_trans(pPkt, pDev);
323 pPkt->ip_summed = CHECKSUM_NONE;
324 if (fDstWire)
325 {
326 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
327 /* Restore ethernet header back. */
328 skb_push(pPkt, ETH_HLEN);
329 VBOX_SKB_RESET_MAC_HDR(pPkt);
330 }
331 VBOXNETFLT_SKB_CB(pPkt) = VBOXNETFLT_CB_TAG;
332
333 return pPkt;
334 }
335 else
336 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
337 pSG->pvUserData = NULL;
338
339 return NULL;
340}
341
342
343/**
344 * Initializes a SG list from an sk_buff.
345 *
346 * @returns Number of segments.
347 * @param pThis The instance.
348 * @param pBuf The sk_buff.
349 * @param pSG The SG.
350 * @param pvFrame The frame pointer, optional.
351 * @param cSegs The number of segments allocated for the SG.
352 * This should match the number in the mbuf exactly!
353 * @param fSrc The source of the frame.
354 */
355DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
356{
357 int i;
358 NOREF(pThis);
359
360 Assert(!skb_shinfo(pBuf)->frag_list);
361 pSG->pvOwnerData = NULL;
362 pSG->pvUserData = NULL;
363 pSG->pvUserData2 = NULL;
364 pSG->cUsers = 1;
365 pSG->fFlags = INTNETSG_FLAGS_TEMP;
366 pSG->cSegsAlloc = cSegs;
367
368 if (fSrc & INTNETTRUNKDIR_WIRE)
369 {
370 /*
371 * The packet came from wire, ethernet header was removed by device driver.
372 * Restore it.
373 */
374 skb_push(pBuf, ETH_HLEN);
375 }
376 pSG->cbTotal = pBuf->len;
377#ifdef VBOXNETFLT_SG_SUPPORT
378 pSG->aSegs[0].cb = skb_headlen(pBuf);
379 pSG->aSegs[0].pv = pBuf->data;
380 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
381
382 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
383 {
384 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
385 pSG->aSegs[i+1].cb = pFrag->size;
386 pSG->aSegs[i+1].pv = kmap(pFrag->page);
387 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
388 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
389 }
390 pSG->cSegsUsed = ++i;
391#else
392 pSG->aSegs[0].cb = pBuf->len;
393 pSG->aSegs[0].pv = pBuf->data;
394 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
395 pSG->cSegsUsed = i = 1;
396#endif
397
398
399#ifdef PADD_RUNT_FRAMES_FROM_HOST
400 /*
401 * Add a trailer if the frame is too small.
402 *
403 * Since we're getting to the packet before it is framed, it has not
404 * yet been padded. The current solution is to add a segment pointing
405 * to a buffer containing all zeros and pray that works for all frames...
406 */
407 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
408 {
409 static uint8_t const s_abZero[128] = {0};
410
411 AssertReturnVoid(i < cSegs);
412
413 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
414 pSG->aSegs[i].pv = (void *)&s_abZero[0];
415 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
416 pSG->cbTotal = 60;
417 pSG->cSegsUsed++;
418 }
419#endif
420 Log2(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
421 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
422 for (i = 0; i < pSG->cSegsUsed; i++)
423 Log2(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
424 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
425}
426
427/**
428 * Packet handler,
429 *
430 * @returns 0 or EJUSTRETURN.
431 * @param pThis The instance.
432 * @param pMBuf The mbuf.
433 * @param pvFrame The start of the frame, optional.
434 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
435 * @param eProtocol The protocol.
436 */
437static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
438 struct net_device *pSkbDev,
439 struct packet_type *pPacketType,
440 struct net_device *pOrigDev)
441{
442 PVBOXNETFLTINS pThis;
443 struct net_device *pDev;
444 /*
445 * Drop it immediately?
446 */
447 Log2(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p pOrigDev=%p\n",
448 pBuf, pSkbDev, pPacketType, pOrigDev));
449 if (!pBuf)
450 return 0;
451 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
452 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
453 if (pThis->u.s.pDev != pSkbDev)
454 {
455 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
456 return 0;
457 }
458
459 if (vboxNetFltLinuxSkBufIsOur(pBuf))
460 {
461 dev_kfree_skb(pBuf);
462 return 0;
463 }
464
465 /* Add the packet to transmit queue and schedule the bottom half. */
466 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
467 schedule_work(&pThis->u.s.XmitTask);
468 Log2(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
469 &pThis->u.s.XmitTask, pBuf));
470 /* It does not really matter what we return, it is ignored by the kernel. */
471 return 0;
472}
473
474static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
475{
476#ifdef VBOXNETFLT_SG_SUPPORT
477 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
478#else
479 unsigned cSegs = 1;
480#endif
481#ifdef PADD_RUNT_FRAMES_FROM_HOST
482 /*
483 * Add a trailer if the frame is too small.
484 */
485 if (pBuf->len < 60)
486 cSegs++;
487#endif
488 return cSegs;
489}
490
491/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
492static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
493{
494#ifdef VBOXNETFLT_SG_SUPPORT
495 int i;
496
497 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
498 {
499 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
500 kunmap(pSG->aSegs[i+1].pv);
501 }
502#endif
503
504 dev_kfree_skb(pBuf);
505}
506
507static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
508{
509 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
510 if (cSegs < MAX_SKB_FRAGS)
511 {
512 uint8_t *pTmp;
513 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
514 if (!pSG)
515 {
516 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
517 return VERR_NO_MEMORY;
518 }
519 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
520
521 pTmp = pSG->aSegs[0].pv;
522 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
523 " <-- (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
524 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
525 (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire",
526 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
527 pSG->cbTotal));
528 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
529 Log2(("VBoxNetFlt: Dropping the sk_buff.\n"));
530 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
531 }
532
533 return VINF_SUCCESS;
534}
535
536static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
537{
538 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
539
540#ifndef VBOXNETFLT_SG_SUPPORT
541 /*
542 * Get rid of fragmented packets, they cause too much trouble.
543 */
544 struct sk_buff *pCopy = skb_copy(pBuf, GFP_KERNEL);
545 kfree_skb(pBuf);
546 if (!pCopy)
547 {
548 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
549 return;
550 }
551 pBuf = pCopy;
552#endif
553
554 if (VBOX_SKB_IS_GSO(pBuf))
555 {
556 /* Need to segment the packet */
557 struct sk_buff *pNext, *pSegment;
558 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
559 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
560
561 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
562 {
563 pNext = pSegment->next;
564 pSegment->next = 0;
565 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
566 }
567 dev_kfree_skb(pBuf);
568 }
569 else
570 {
571 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
572 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
573 {
574 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
575 dev_kfree_skb(pBuf);
576 return;
577 }
578 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
579 }
580 /*
581 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
582 */
583}
584
585static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
586{
587 struct sk_buff *pBuf;
588 bool fActive;
589 PVBOXNETFLTINS pThis;
590 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
591
592 Log2(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
593 pThis = VBOX_FLT_XT_TO_INST(pWork);
594 /*
595 * Active? Retain the instance and increment the busy counter.
596 */
597 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
598 fActive = ASMAtomicUoReadBool(&pThis->fActive);
599 if (fActive)
600 vboxNetFltRetain(pThis, true /* fBusy */);
601 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
602 if (!fActive)
603 return;
604
605 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
606 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
607
608 vboxNetFltRelease(pThis, true /* fBusy */);
609}
610
611/**
612 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
613 *
614 * @returns VBox status code.
615 * @param pThis The instance.
616 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
617 * flood the release log.
618 */
619static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
620{
621 struct packet_type *pt;
622 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
623
624 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
625
626 if (!pDev)
627 {
628 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
629 return VERR_INTNET_FLT_IF_NOT_FOUND;
630 }
631
632 dev_hold(pDev);
633 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
634 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
635 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
636
637 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
638 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
639 /*
640 * Get the mac address while we still have a valid ifnet reference.
641 */
642 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
643
644 pt = &pThis->u.s.PacketType;
645 pt->type = __constant_htons(ETH_P_ALL);
646 pt->dev = pDev;
647 pt->func = vboxNetFltLinuxPacketHandler;
648 dev_add_pack(pt);
649 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
650 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
651 if (pDev)
652 {
653 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
654 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
655 pDev = NULL; /* don't dereference it */
656 }
657 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
658 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
659
660 /* Release the interface on failure. */
661 if (pDev)
662 {
663 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
664 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
665 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
666 dev_put(pDev);
667 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
668 }
669
670 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
671 return VINF_SUCCESS;
672}
673
674
675static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
676{
677 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
678
679 Assert(!pThis->fDisconnectedFromHost);
680 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
681 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
682 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
683 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
684 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
685
686 dev_remove_pack(&pThis->u.s.PacketType);
687 skb_queue_purge(&pThis->u.s.XmitQueue);
688 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
689 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
690 dev_put(pDev);
691
692 return NOTIFY_OK;
693}
694
695static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
696{
697 /* Check if we are not suspended and promiscuous mode has not been set. */
698 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
699 {
700 /* Note that there is no need for locking as the kernel got hold of the lock already. */
701 dev_set_promiscuity(pDev, 1);
702 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
703 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
704 }
705 else
706 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
707 return NOTIFY_OK;
708}
709
710static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
711{
712 /* Undo promiscuous mode if we has set it. */
713 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
714 {
715 /* Note that there is no need for locking as the kernel got hold of the lock already. */
716 dev_set_promiscuity(pDev, -1);
717 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
718 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
719 }
720 else
721 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
722 return NOTIFY_OK;
723}
724
725static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
726
727{
728 int rc = NOTIFY_OK;
729#ifdef DEBUG
730 char *pszEvent = "<unknown>";
731#endif
732 struct net_device *pDev = (struct net_device *)ptr;
733 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
734
735#ifdef DEBUG
736 switch (ulEventType)
737 {
738 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
739 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
740 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
741 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
742 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
743 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
744 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
745 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
746 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
747 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
748 }
749 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
750 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
751#endif
752 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
753 {
754 vboxNetFltLinuxAttachToInterface(pThis, pDev);
755 }
756 else
757 {
758 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
759 if (pDev != ptr)
760 return NOTIFY_OK;
761 rc = NOTIFY_OK;
762 switch (ulEventType)
763 {
764 case NETDEV_UNREGISTER:
765 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
766 break;
767 case NETDEV_UP:
768 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
769 break;
770 case NETDEV_GOING_DOWN:
771 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
772 break;
773 case NETDEV_CHANGENAME:
774 break;
775 }
776 }
777
778 return rc;
779}
780
781bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
782{
783 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
784}
785
786
787int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
788{
789 uint8_t *pTmp;
790 struct net_device * pDev;
791 int err;
792 int rc = VINF_SUCCESS;
793
794 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
795
796 pTmp = pSG->aSegs[0].pv;
797
798 pDev = vboxNetFltLinuxRetainNetDev(pThis);
799 if (pDev)
800 {
801 /*
802 * Create a sk_buff for the gather list and push it onto the wire.
803 */
804 if (fDst & INTNETTRUNKDIR_WIRE)
805 {
806 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
807 if (pBuf)
808 {
809 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
810 " --> (wire)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
811 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
812 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
813 pSG->cbTotal));
814 err = dev_queue_xmit(pBuf);
815 if (err)
816 rc = RTErrConvertFromErrno(err);
817 }
818 else
819 rc = VERR_NO_MEMORY;
820 }
821
822 /*
823 * Create a sk_buff for the gather list and push it onto the host stack.
824 */
825 if (fDst & INTNETTRUNKDIR_HOST)
826 {
827 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
828 if (pBuf)
829 {
830 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
831 " --> (host)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
832 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
833 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
834 pSG->cbTotal));
835 err = netif_rx_ni(pBuf);
836 if (err)
837 rc = RTErrConvertFromErrno(err);
838 }
839 else
840 rc = VERR_NO_MEMORY;
841 }
842
843 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
844 }
845
846 return rc;
847}
848
849
850bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
851{
852 bool fRc = false;
853 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
854 if (pDev)
855 {
856 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
857 Log(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
858 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
859 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
860 }
861 return fRc;
862}
863
864
865void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
866{
867 *pMac = pThis->u.s.Mac;
868}
869
870
871bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
872{
873 /* ASSUMES that the MAC address never changes. */
874 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
875 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
876 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
877}
878
879
880void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
881{
882 struct net_device * pDev;
883
884 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s\n",
885 pThis, pThis->szName, fActive?"true":"false"));
886
887 pDev = vboxNetFltLinuxRetainNetDev(pThis);
888 if (pDev)
889 {
890 /*
891 * This api is a bit weird, the best reference is the code.
892 *
893 * Also, we have a bit or race conditions wrt the maintance of
894 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
895 */
896 u_int16_t fIf;
897#ifdef LOG_ENABLED
898 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
899#endif
900 if (fActive)
901 {
902 Assert(!pThis->u.s.fPromiscuousSet);
903
904#if 0
905 /*
906 * Try bring the interface up and running if it's down.
907 */
908 fIf = dev_get_flags(pDev);
909 if ((fIf & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
910 {
911 rtnl_lock();
912 int err = dev_change_flags(pDev, fIf | IFF_UP);
913 rtnl_unlock();
914 fIf = dev_get_flags(pDev);
915 }
916
917 /*
918 * Is it already up? If it isn't, leave it to the link event or
919 * we'll upset if_pcount (as stated above, ifnet_set_promiscuous is weird).
920 */
921 if ((fIf & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)
922 && !ASMAtomicReadBool(&pThis->u.s.fPromiscuousSet))
923 {
924#endif
925 rtnl_lock();
926 dev_set_promiscuity(pDev, 1);
927 rtnl_unlock();
928 pThis->u.s.fPromiscuousSet = true;
929 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
930#if 0
931 /* check if it actually worked, this stuff is not always behaving well. */
932 if (!(dev_get_flags(pDev) & IFF_PROMISC))
933 {
934 err = dev_change_flags(pDev, fIf | IFF_PROMISC);
935 if (!err)
936 Log(("vboxNetFlt: fixed IFF_PROMISC on %s (%d->%d)\n", pThis->szName, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
937 else
938 Log(("VBoxNetFlt: failed to fix IFF_PROMISC on %s, err=%d (%d->%d)\n",
939 pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
940 }
941#endif
942#if 0
943 }
944 else if (!err)
945 Log(("VBoxNetFlt: Waiting for the link to come up... (%d->%d)\n", cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
946 if (err)
947 LogRel(("VBoxNetFlt: Failed to put '%s' into promiscuous mode, err=%d (%d->%d)\n", pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
948#endif
949 }
950 else
951 {
952 if (pThis->u.s.fPromiscuousSet)
953 {
954 rtnl_lock();
955 dev_set_promiscuity(pDev, -1);
956 rtnl_unlock();
957 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
958 }
959 pThis->u.s.fPromiscuousSet = false;
960
961 fIf = dev_get_flags(pDev);
962 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
963 }
964
965 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
966 }
967}
968
969
970int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
971{
972 /* Nothing to do here. */
973 return VINF_SUCCESS;
974}
975
976
977int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
978{
979 /* Nothing to do here. */
980 return VINF_SUCCESS;
981}
982
983
984void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
985{
986 struct net_device *pDev;
987 bool fRegistered;
988 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
989
990 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
991 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
992 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
993 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
994 if (fRegistered)
995 {
996 dev_remove_pack(&pThis->u.s.PacketType);
997 skb_queue_purge(&pThis->u.s.XmitQueue);
998 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
999 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1000 dev_put(pDev);
1001 }
1002 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1003 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1004}
1005
1006
1007int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis)
1008{
1009 int err;
1010 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1011 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1012 if (err)
1013 return VERR_INTNET_FLT_IF_FAILED;
1014 if (!pThis->u.s.fRegistered)
1015 {
1016 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1017 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1018 return VERR_INTNET_FLT_IF_NOT_FOUND;
1019 }
1020 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1021 return pThis->fDisconnectedFromHost ? VERR_INTNET_FLT_IF_FAILED : VINF_SUCCESS;
1022}
1023
1024int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1025{
1026 /*
1027 * Init the linux specific members.
1028 */
1029 pThis->u.s.pDev = NULL;
1030 pThis->u.s.fRegistered = false;
1031 pThis->u.s.fPromiscuousSet = false;
1032 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1033 skb_queue_head_init(&pThis->u.s.XmitQueue);
1034#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1035 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1036#else
1037 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1038#endif
1039
1040 return VINF_SUCCESS;
1041}
1042
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette