VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 20345

Last change on this file since 20345 was 20345, checked in by vboxsync, 16 years ago

#3783: Include missing linux/ip.h.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.6 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 20345 2009-06-05 15:58:07Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30#include <linux/miscdevice.h>
31#include <linux/ip.h>
32
33#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
34#include <VBox/log.h>
35#include <VBox/err.h>
36#include <iprt/alloca.h>
37#include <iprt/assert.h>
38#include <iprt/spinlock.h>
39#include <iprt/semaphore.h>
40#include <iprt/initterm.h>
41#include <iprt/process.h>
42#include <iprt/mem.h>
43#include <iprt/log.h>
44#include <iprt/mp.h>
45#include <iprt/mem.h>
46#include <iprt/time.h>
47
48#define VBOXNETFLT_OS_SPECFIC 1
49#include "../VBoxNetFltInternal.h"
50
51#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
52 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
53#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
54 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
55#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
56 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
57
58#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
59
60#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
61# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
62# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
63#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
64# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
65# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
66#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
67
68#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
69# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
70#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
71# define CHECKSUM_PARTIAL CHECKSUM_HW
72# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
73# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
74# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
75# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
76# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
77# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
78# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
79# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
80# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
81#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
82
83#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
84# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
85 /* No features, very dumb device */
86# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
87#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
88# define VBOX_SKB_IS_GSO(skb) false
89# define VBOX_SKB_GSO_SEGMENT(skb) NULL
90#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
91
92#ifndef NET_IP_ALIGN
93# define NET_IP_ALIGN 2
94#endif
95
96#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
97unsigned dev_get_flags(const struct net_device *dev)
98{
99 unsigned flags;
100
101 flags = (dev->flags & ~(IFF_PROMISC |
102 IFF_ALLMULTI |
103 IFF_RUNNING)) |
104 (dev->gflags & (IFF_PROMISC |
105 IFF_ALLMULTI));
106
107 if (netif_running(dev) && netif_carrier_ok(dev))
108 flags |= IFF_RUNNING;
109
110 return flags;
111}
112#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static int VBoxNetFltLinuxInit(void);
118static void VBoxNetFltLinuxUnload(void);
119
120
121/*******************************************************************************
122* Global Variables *
123*******************************************************************************/
124/**
125 * The (common) global data.
126 */
127#ifdef RT_ARCH_AMD64
128/**
129 * Memory for the executable memory heap (in IPRT).
130 */
131extern uint8_t g_abExecMemory[4096]; /* cannot donate less than one page */
132__asm__(".section execmemory, \"awx\", @progbits\n\t"
133 ".align 32\n\t"
134 ".globl g_abExecMemory\n"
135 "g_abExecMemory:\n\t"
136 ".zero 4096\n\t"
137 ".type g_abExecMemory, @object\n\t"
138 ".size g_abExecMemory, 4096\n\t"
139 ".text\n\t");
140#endif
141
142static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
143
144module_init(VBoxNetFltLinuxInit);
145module_exit(VBoxNetFltLinuxUnload);
146
147MODULE_AUTHOR("Sun Microsystems, Inc.");
148MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
149MODULE_LICENSE("GPL");
150#ifdef MODULE_VERSION
151# define xstr(s) str(s)
152# define str(s) #s
153MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
154#endif
155
156/**
157 * The (common) global data.
158 */
159static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
160
161/**
162 * Initialize module.
163 *
164 * @returns appropriate status code.
165 */
166static int __init VBoxNetFltLinuxInit(void)
167{
168 int rc;
169 /*
170 * Initialize IPRT.
171 */
172 rc = RTR0Init(0);
173 if (RT_SUCCESS(rc))
174 {
175#ifdef RT_ARCH_AMD64
176 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
177 printk("VBoxNetFlt: dbg - g_abExecMemory=%p\n", (void *)&g_abExecMemory[0]);
178 if (RT_FAILURE(rc))
179 {
180 printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
181 }
182#endif
183 Log(("VBoxNetFltLinuxInit\n"));
184
185 /*
186 * Initialize the globals and connect to the support driver.
187 *
188 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
189 * for establishing the connect to the support driver.
190 */
191 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
192 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
193 if (RT_SUCCESS(rc))
194 {
195 LogRel(("VBoxNetFlt: Successfully started.\n"));
196 return 0;
197 }
198 else
199 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
200 RTR0Term();
201 }
202 else
203 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
204
205 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
206 return -RTErrConvertToErrno(rc);
207}
208
209
210/**
211 * Unload the module.
212 *
213 * @todo We have to prevent this if we're busy!
214 */
215static void __exit VBoxNetFltLinuxUnload(void)
216{
217 int rc;
218 Log(("VBoxNetFltLinuxUnload\n"));
219 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
220
221 /*
222 * Undo the work done during start (in reverse order).
223 */
224 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
225 AssertRC(rc); NOREF(rc);
226
227 RTR0Term();
228
229 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
230
231 Log(("VBoxNetFltLinuxUnload - done\n"));
232}
233
234
235/**
236 * Reads and retains the host interface handle.
237 *
238 * @returns The handle, NULL if detached.
239 * @param pThis
240 */
241DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
242{
243#if 0
244 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
245 struct net_device *pDev = NULL;
246
247 Log(("vboxNetFltLinuxRetainNetDev\n"));
248 /*
249 * Be careful here to avoid problems racing the detached callback.
250 */
251 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
252 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
253 {
254 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
255 if (pDev)
256 {
257 dev_hold(pDev);
258 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
259 }
260 }
261 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
262
263 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
264 return pDev;
265#else
266 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
267#endif
268}
269
270
271/**
272 * Release the host interface handle previously retained
273 * by vboxNetFltLinuxRetainNetDev.
274 *
275 * @param pThis The instance.
276 * @param pDev The vboxNetFltLinuxRetainNetDev
277 * return value, NULL is fine.
278 */
279DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
280{
281#if 0
282 Log(("vboxNetFltLinuxReleaseNetDev\n"));
283 NOREF(pThis);
284 if (pDev)
285 {
286 dev_put(pDev);
287 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
288 }
289 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
290#endif
291}
292
293#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
294#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
295
296/**
297 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
298 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
299 *
300 * @returns true / false accordingly.
301 * @param pBuf The sk_buff.
302 */
303DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
304{
305 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
306}
307
308
309/**
310 * Internal worker that create a linux sk_buff for a
311 * (scatter/)gather list.
312 *
313 * @returns Pointer to the sk_buff.
314 * @param pThis The instance.
315 * @param pSG The (scatter/)gather list.
316 */
317static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
318{
319 struct sk_buff *pPkt;
320 struct net_device *pDev;
321 /*
322 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
323 * will only contain one single segment.
324 */
325 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
326 {
327 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
328 return NULL;
329 }
330 if (pSG->cbTotal == 0)
331 {
332 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
333 return NULL;
334 }
335
336 /*
337 * Allocate a packet and copy over the data.
338 *
339 */
340 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
341 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
342 if (pPkt)
343 {
344 pPkt->dev = pDev;
345 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
346 skb_reserve(pPkt, NET_IP_ALIGN);
347 skb_put(pPkt, pSG->cbTotal);
348 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
349 /* Set protocol and packet_type fields. */
350 pPkt->protocol = eth_type_trans(pPkt, pDev);
351 pPkt->ip_summed = CHECKSUM_NONE;
352 if (fDstWire)
353 {
354 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
355 /* Restore ethernet header back. */
356 skb_push(pPkt, ETH_HLEN);
357 VBOX_SKB_RESET_MAC_HDR(pPkt);
358 }
359 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
360
361 return pPkt;
362 }
363 else
364 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
365 pSG->pvUserData = NULL;
366
367 return NULL;
368}
369
370
371/**
372 * Initializes a SG list from an sk_buff.
373 *
374 * @returns Number of segments.
375 * @param pThis The instance.
376 * @param pBuf The sk_buff.
377 * @param pSG The SG.
378 * @param pvFrame The frame pointer, optional.
379 * @param cSegs The number of segments allocated for the SG.
380 * This should match the number in the mbuf exactly!
381 * @param fSrc The source of the frame.
382 */
383DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
384{
385 int i;
386 NOREF(pThis);
387
388 Assert(!skb_shinfo(pBuf)->frag_list);
389 pSG->pvOwnerData = NULL;
390 pSG->pvUserData = NULL;
391 pSG->pvUserData2 = NULL;
392 pSG->cUsers = 1;
393 pSG->fFlags = INTNETSG_FLAGS_TEMP;
394 pSG->cSegsAlloc = cSegs;
395
396 if (fSrc & INTNETTRUNKDIR_WIRE)
397 {
398 /*
399 * The packet came from wire, ethernet header was removed by device driver.
400 * Restore it.
401 */
402 skb_push(pBuf, ETH_HLEN);
403 }
404 pSG->cbTotal = pBuf->len;
405#ifdef VBOXNETFLT_SG_SUPPORT
406 pSG->aSegs[0].cb = skb_headlen(pBuf);
407 pSG->aSegs[0].pv = pBuf->data;
408 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
409
410 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
411 {
412 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
413 pSG->aSegs[i+1].cb = pFrag->size;
414 pSG->aSegs[i+1].pv = kmap(pFrag->page);
415 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
416 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
417 }
418 pSG->cSegsUsed = ++i;
419#else
420 pSG->aSegs[0].cb = pBuf->len;
421 pSG->aSegs[0].pv = pBuf->data;
422 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
423 pSG->cSegsUsed = i = 1;
424#endif
425
426
427#ifdef PADD_RUNT_FRAMES_FROM_HOST
428 /*
429 * Add a trailer if the frame is too small.
430 *
431 * Since we're getting to the packet before it is framed, it has not
432 * yet been padded. The current solution is to add a segment pointing
433 * to a buffer containing all zeros and pray that works for all frames...
434 */
435 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
436 {
437 static uint8_t const s_abZero[128] = {0};
438
439 AssertReturnVoid(i < cSegs);
440
441 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
442 pSG->aSegs[i].pv = (void *)&s_abZero[0];
443 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
444 pSG->cbTotal = 60;
445 pSG->cSegsUsed++;
446 }
447#endif
448 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
449 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
450 for (i = 0; i < pSG->cSegsUsed; i++)
451 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
452 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
453}
454
455/**
456 * Packet handler,
457 *
458 * @returns 0 or EJUSTRETURN.
459 * @param pThis The instance.
460 * @param pMBuf The mbuf.
461 * @param pvFrame The start of the frame, optional.
462 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
463 * @param eProtocol The protocol.
464 */
465#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
466static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
467 struct net_device *pSkbDev,
468 struct packet_type *pPacketType,
469 struct net_device *pOrigDev)
470#else
471static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
472 struct net_device *pSkbDev,
473 struct packet_type *pPacketType)
474#endif
475{
476 PVBOXNETFLTINS pThis;
477 struct net_device *pDev;
478 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
479 pBuf, pSkbDev, pPacketType));
480#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
481 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
482 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
483#else
484 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
485 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
486#endif
487 /*
488 * Drop it immediately?
489 */
490 if (!pBuf)
491 return 0;
492
493 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
494 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
495 if (pThis->u.s.pDev != pSkbDev)
496 {
497 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
498 return 0;
499 }
500
501 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
502 if (vboxNetFltLinuxSkBufIsOur(pBuf))
503 {
504 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
505 dev_kfree_skb(pBuf);
506 return 0;
507 }
508
509#ifndef VBOXNETFLT_SG_SUPPORT
510 {
511 /*
512 * Get rid of fragmented packets, they cause too much trouble.
513 */
514 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
515 kfree_skb(pBuf);
516 if (!pCopy)
517 {
518 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
519 return 0;
520 }
521 pBuf = pCopy;
522#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
523 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
524 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
525#else
526 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
527 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
528#endif
529 }
530#endif
531
532 /* Add the packet to transmit queue and schedule the bottom half. */
533 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
534 schedule_work(&pThis->u.s.XmitTask);
535 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
536 &pThis->u.s.XmitTask, pBuf));
537 /* It does not really matter what we return, it is ignored by the kernel. */
538 return 0;
539}
540
541static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
542{
543#ifdef VBOXNETFLT_SG_SUPPORT
544 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
545#else
546 unsigned cSegs = 1;
547#endif
548#ifdef PADD_RUNT_FRAMES_FROM_HOST
549 /*
550 * Add a trailer if the frame is too small.
551 */
552 if (pBuf->len < 60)
553 cSegs++;
554#endif
555 return cSegs;
556}
557
558/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
559static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
560{
561#ifdef VBOXNETFLT_SG_SUPPORT
562 int i;
563
564 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
565 {
566 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
567 kunmap(pSG->aSegs[i+1].pv);
568 }
569#endif
570
571 dev_kfree_skb(pBuf);
572}
573
574#ifndef LOG_ENABLED
575#define vboxNetFltDumpPacket(a, b, c, d)
576#else
577static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
578{
579 uint8_t *pInt, *pExt;
580 static int iPacketNo = 1;
581 iPacketNo += iIncrement;
582 if (fEgress)
583 {
584 pExt = pSG->aSegs[0].pv;
585 pInt = pExt + 6;
586 }
587 else
588 {
589 pInt = pSG->aSegs[0].pv;
590 pExt = pInt + 6;
591 }
592 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
593 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
594 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
595 fEgress ? "-->" : "<--", pszWhere,
596 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
597 pSG->cbTotal, iPacketNo));
598 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
599}
600#endif
601
602static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
603{
604 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
605 if (cSegs < MAX_SKB_FRAGS)
606 {
607 uint8_t *pTmp;
608 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
609 if (!pSG)
610 {
611 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
612 return VERR_NO_MEMORY;
613 }
614 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
615
616 pTmp = pSG->aSegs[0].pv;
617 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
618 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
619 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
620 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
621 }
622
623 return VINF_SUCCESS;
624}
625
626static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
627{
628 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
629
630 if (VBOX_SKB_IS_GSO(pBuf))
631 {
632 /* Need to segment the packet */
633 struct sk_buff *pNext, *pSegment;
634#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
635 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
636 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
637#endif
638
639 pSegment = VBOX_SKB_GSO_SEGMENT(pBuf);
640 if (IS_ERR(pSegment))
641 {
642 dev_kfree_skb(pBuf);
643 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pBuf)));
644 return;
645 }
646 for (; pSegment; pSegment = pNext)
647 {
648#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
649 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
650 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
651#endif
652 pNext = pSegment->next;
653 pSegment->next = 0;
654 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
655 }
656 dev_kfree_skb(pBuf);
657 }
658 else
659 {
660 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
661 {
662#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
663 /*
664 * Try to work around the problem with CentOS 5.2 (2.6.18 kernel),
665 * it passes wrong 'h' pointer down. We take IP header length from
666 * the header itself and reconstruct 'h' pointer to TCP (or whatever)
667 * header.
668 */
669 unsigned char *tmp = pBuf->h.raw;
670 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
671 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
672#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) */
673 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
674 {
675 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
676 dev_kfree_skb(pBuf);
677 return;
678 }
679#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
680 /* Restore the original (wrong) pointer. */
681 pBuf->h.raw = tmp;
682#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) */
683 }
684 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
685 }
686 /*
687 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
688 */
689}
690
691#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
692static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
693#else
694static void vboxNetFltLinuxXmitTask(void *pWork)
695#endif
696{
697 struct sk_buff *pBuf;
698 bool fActive;
699 PVBOXNETFLTINS pThis;
700 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
701
702 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
703 pThis = VBOX_FLT_XT_TO_INST(pWork);
704 /*
705 * Active? Retain the instance and increment the busy counter.
706 */
707 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
708 fActive = ASMAtomicUoReadBool(&pThis->fActive);
709 if (fActive)
710 vboxNetFltRetain(pThis, true /* fBusy */);
711 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
712 if (!fActive)
713 return;
714
715 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
716 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
717
718 vboxNetFltRelease(pThis, true /* fBusy */);
719}
720
721/**
722 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
723 *
724 * @returns VBox status code.
725 * @param pThis The instance.
726 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
727 * flood the release log.
728 */
729static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
730{
731 struct packet_type *pt;
732 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
733
734 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
735
736 if (!pDev)
737 {
738 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
739 return VERR_INTNET_FLT_IF_NOT_FOUND;
740 }
741
742 dev_hold(pDev);
743 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
744 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
745 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
746
747 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
748 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
749 /*
750 * Get the mac address while we still have a valid ifnet reference.
751 */
752 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
753
754 pt = &pThis->u.s.PacketType;
755 pt->type = __constant_htons(ETH_P_ALL);
756 pt->dev = pDev;
757 pt->func = vboxNetFltLinuxPacketHandler;
758 dev_add_pack(pt);
759 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
760 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
761 if (pDev)
762 {
763 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
764 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
765 pDev = NULL; /* don't dereference it */
766 }
767 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
768 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
769
770 /* Release the interface on failure. */
771 if (pDev)
772 {
773 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
774 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
775 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
776 dev_put(pDev);
777 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
778 }
779
780 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
781 return VINF_SUCCESS;
782}
783
784
785static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
786{
787 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
788
789 Assert(!pThis->fDisconnectedFromHost);
790 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
791 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
792 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
793 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
794 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
795
796 dev_remove_pack(&pThis->u.s.PacketType);
797 skb_queue_purge(&pThis->u.s.XmitQueue);
798 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
799 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
800 dev_put(pDev);
801
802 return NOTIFY_OK;
803}
804
805static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
806{
807 /* Check if we are not suspended and promiscuous mode has not been set. */
808 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
809 {
810 /* Note that there is no need for locking as the kernel got hold of the lock already. */
811 dev_set_promiscuity(pDev, 1);
812 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
813 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
814 }
815 else
816 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
817 return NOTIFY_OK;
818}
819
820static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
821{
822 /* Undo promiscuous mode if we has set it. */
823 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
824 {
825 /* Note that there is no need for locking as the kernel got hold of the lock already. */
826 dev_set_promiscuity(pDev, -1);
827 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
828 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
829 }
830 else
831 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
832 return NOTIFY_OK;
833}
834
835static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
836
837{
838 int rc = NOTIFY_OK;
839#ifdef DEBUG
840 char *pszEvent = "<unknown>";
841#endif
842 struct net_device *pDev = (struct net_device *)ptr;
843 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
844
845#ifdef DEBUG
846 switch (ulEventType)
847 {
848 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
849 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
850 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
851 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
852 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
853 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
854 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
855 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
856 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
857 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
858 }
859 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
860 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
861#endif
862 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
863 {
864 vboxNetFltLinuxAttachToInterface(pThis, pDev);
865 }
866 else
867 {
868 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
869 if (pDev != ptr)
870 return NOTIFY_OK;
871 rc = NOTIFY_OK;
872 switch (ulEventType)
873 {
874 case NETDEV_UNREGISTER:
875 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
876 break;
877 case NETDEV_UP:
878 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
879 break;
880 case NETDEV_GOING_DOWN:
881 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
882 break;
883 case NETDEV_CHANGENAME:
884 break;
885 }
886 }
887
888 return rc;
889}
890
891bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
892{
893 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
894}
895
896int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
897{
898 struct net_device * pDev;
899 int err;
900 int rc = VINF_SUCCESS;
901
902 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
903
904 pDev = vboxNetFltLinuxRetainNetDev(pThis);
905 if (pDev)
906 {
907 /*
908 * Create a sk_buff for the gather list and push it onto the wire.
909 */
910 if (fDst & INTNETTRUNKDIR_WIRE)
911 {
912 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
913 if (pBuf)
914 {
915 vboxNetFltDumpPacket(pSG, true, "wire", 1);
916 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
917 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
918 err = dev_queue_xmit(pBuf);
919 if (err)
920 rc = RTErrConvertFromErrno(err);
921 }
922 else
923 rc = VERR_NO_MEMORY;
924 }
925
926 /*
927 * Create a sk_buff for the gather list and push it onto the host stack.
928 */
929 if (fDst & INTNETTRUNKDIR_HOST)
930 {
931 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
932 if (pBuf)
933 {
934 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
935 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
936 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
937 err = netif_rx_ni(pBuf);
938 if (err)
939 rc = RTErrConvertFromErrno(err);
940 }
941 else
942 rc = VERR_NO_MEMORY;
943 }
944
945 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
946 }
947
948 return rc;
949}
950
951
952bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
953{
954 bool fRc = false;
955 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
956 if (pDev)
957 {
958 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
959 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
960 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
961 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
962 }
963 return fRc;
964}
965
966
967void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
968{
969 *pMac = pThis->u.s.Mac;
970}
971
972
973bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
974{
975 /* ASSUMES that the MAC address never changes. */
976 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
977 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
978 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
979}
980
981
982void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
983{
984 struct net_device * pDev;
985
986 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
987 pThis, pThis->szName, fActive?"true":"false",
988 pThis->fDisablePromiscuous?"true":"false"));
989
990 if (pThis->fDisablePromiscuous)
991 return;
992
993 pDev = vboxNetFltLinuxRetainNetDev(pThis);
994 if (pDev)
995 {
996 /*
997 * This api is a bit weird, the best reference is the code.
998 *
999 * Also, we have a bit or race conditions wrt the maintance of
1000 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
1001 */
1002#ifdef LOG_ENABLED
1003 u_int16_t fIf;
1004 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
1005#endif
1006 if (fActive)
1007 {
1008 Assert(!pThis->u.s.fPromiscuousSet);
1009
1010 rtnl_lock();
1011 dev_set_promiscuity(pDev, 1);
1012 rtnl_unlock();
1013 pThis->u.s.fPromiscuousSet = true;
1014 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
1015 }
1016 else
1017 {
1018 if (pThis->u.s.fPromiscuousSet)
1019 {
1020 rtnl_lock();
1021 dev_set_promiscuity(pDev, -1);
1022 rtnl_unlock();
1023 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
1024 }
1025 pThis->u.s.fPromiscuousSet = false;
1026
1027#ifdef LOG_ENABLED
1028 fIf = dev_get_flags(pDev);
1029 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1030#endif
1031 }
1032
1033 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1034 }
1035}
1036
1037
1038int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
1039{
1040 /* Nothing to do here. */
1041 return VINF_SUCCESS;
1042}
1043
1044
1045int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1046{
1047 /* Nothing to do here. */
1048 return VINF_SUCCESS;
1049}
1050
1051
1052void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1053{
1054 struct net_device *pDev;
1055 bool fRegistered;
1056 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1057
1058 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1059 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1060 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1061 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1062 if (fRegistered)
1063 {
1064 dev_remove_pack(&pThis->u.s.PacketType);
1065 skb_queue_purge(&pThis->u.s.XmitQueue);
1066 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1067 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1068 dev_put(pDev);
1069 }
1070 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1071 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1072 module_put(THIS_MODULE);
1073}
1074
1075
1076int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
1077{
1078 int err;
1079 NOREF(pvContext);
1080
1081 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1082 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1083 if (err)
1084 return VERR_INTNET_FLT_IF_FAILED;
1085 if (!pThis->u.s.fRegistered)
1086 {
1087 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1088 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1089 return VERR_INTNET_FLT_IF_NOT_FOUND;
1090 }
1091
1092 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1093 if ( pThis->fDisconnectedFromHost
1094 || !try_module_get(THIS_MODULE))
1095 return VERR_INTNET_FLT_IF_FAILED;
1096
1097 return VINF_SUCCESS;
1098}
1099
1100int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1101{
1102 /*
1103 * Init the linux specific members.
1104 */
1105 pThis->u.s.pDev = NULL;
1106 pThis->u.s.fRegistered = false;
1107 pThis->u.s.fPromiscuousSet = false;
1108 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1109 skb_queue_head_init(&pThis->u.s.XmitQueue);
1110#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1111 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1112#else
1113 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1114#endif
1115
1116 return VINF_SUCCESS;
1117}
1118
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette