VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 38054

Last change on this file since 38054 was 38054, checked in by vboxsync, 14 years ago

netflt: fix for regression introduced in r72926

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.7 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 38054 2011-07-19 08:50:19Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/vmm/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53#define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
54#ifdef CONFIG_NET_SCHED
55/*# define VBOXNETFLT_WITH_QDISC Comment this out to disable qdisc support */
56# ifdef VBOXNETFLT_WITH_QDISC
57# include <net/pkt_sched.h>
58# endif /* VBOXNETFLT_WITH_QDISC */
59#endif
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
66#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
67#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
68# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
69#endif
70
71#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
72# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
73# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
74#else
75# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
76# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
77#endif
78
79#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
80# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
81#else
82# define CHECKSUM_PARTIAL CHECKSUM_HW
83# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
84# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
85# else
86# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
87# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
88# else
89# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
90# endif
91/* Versions prior 2.6.10 use stats for both bstats and qstats */
92# define bstats stats
93# define qstats stats
94# endif
95#endif
96
97#ifdef VBOXNETFLT_WITH_QDISC
98# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)
99static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
100{
101 kfree_skb(skb);
102 sch->stats.drops++;
103
104 return NET_XMIT_DROP;
105}
106# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) */
107#endif /* VBOXNETFLT_WITH_QDISC */
108
109#ifndef NET_IP_ALIGN
110# define NET_IP_ALIGN 2
111#endif
112
113#if 0
114/** Create scatter / gather segments for fragments. When not used, we will
115 * linearize the socket buffer before creating the internal networking SG. */
116# define VBOXNETFLT_SG_SUPPORT 1
117#endif
118
119#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
120/** Indicates that the linux kernel may send us GSO frames. */
121# define VBOXNETFLT_WITH_GSO 1
122
123/** This enables or disables the transmitting of GSO frame from the internal
124 * network and to the host. */
125# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
126
127# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
128/** This enables or disables the transmitting of GSO frame from the internal
129 * network and to the wire. */
130# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
131# endif
132
133/** This enables or disables the forwarding/flooding of GSO frame from the host
134 * to the internal network. */
135# define VBOXNETFLT_WITH_GSO_RECV 1
136
137#endif
138
139#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
140/** This enables or disables handling of GSO frames coming from the wire (GRO). */
141# define VBOXNETFLT_WITH_GRO 1
142#endif
143/*
144 * GRO support was backported to RHEL 5.4
145 */
146#ifdef RHEL_RELEASE_CODE
147# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
148# define VBOXNETFLT_WITH_GRO 1
149# endif
150#endif
151
152/*******************************************************************************
153* Internal Functions *
154*******************************************************************************/
155static int VBoxNetFltLinuxInit(void);
156static void VBoxNetFltLinuxUnload(void);
157static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
158
159
160/*******************************************************************************
161* Global Variables *
162*******************************************************************************/
163/**
164 * The (common) global data.
165 */
166static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
167
168module_init(VBoxNetFltLinuxInit);
169module_exit(VBoxNetFltLinuxUnload);
170
171MODULE_AUTHOR(VBOX_VENDOR);
172MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
173MODULE_LICENSE("GPL");
174#ifdef MODULE_VERSION
175MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
176#endif
177
178
179#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
180unsigned dev_get_flags(const struct net_device *dev)
181{
182 unsigned flags;
183
184 flags = (dev->flags & ~(IFF_PROMISC |
185 IFF_ALLMULTI |
186 IFF_RUNNING)) |
187 (dev->gflags & (IFF_PROMISC |
188 IFF_ALLMULTI));
189
190 if (netif_running(dev) && netif_carrier_ok(dev))
191 flags |= IFF_RUNNING;
192
193 return flags;
194}
195#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
196
197
198#ifdef VBOXNETFLT_WITH_QDISC
199//#define QDISC_LOG(x) printk x
200# define QDISC_LOG(x) do { } while (0)
201
202# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
203# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
204# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
205# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
206# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
207# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
208# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
209# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(queue, ops, parent)
210# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
211
212# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
213# define qdisc_dev(qdisc) (qdisc->dev)
214# define qdisc_pkt_len(skb) (skb->len)
215# define QDISC_GET(dev) (dev->qdisc_sleeping)
216# else
217# define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
218# endif
219
220# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
221# define QDISC_SAVED_NUM(dev) 1
222# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
223# define QDISC_SAVED_NUM(dev) dev->num_tx_queues
224# else
225# define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
226# endif
227
228# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
229# define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
230# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
231# define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
232 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
233# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
234# define QDISC_IS_BUSY(dev, qdisc) (qdisc_is_running(qdisc) || \
235 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
236# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
237
238struct VBoxNetQDiscPriv
239{
240 /** Pointer to the single child qdisc. */
241 struct Qdisc *pChild;
242 /*
243 * Technically it is possible to have different qdiscs for different TX
244 * queues so we have to save them all.
245 */
246 /** Pointer to the array of saved qdiscs. */
247 struct Qdisc **ppSaved;
248 /** Pointer to the net filter instance. */
249 PVBOXNETFLTINS pVBoxNetFlt;
250};
251typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
252
253//#define VBOXNETFLT_QDISC_ENQUEUE
254static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
255{
256 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
257 int rc;
258
259# ifdef VBOXNETFLT_QDISC_ENQUEUE
260 if (VALID_PTR(pPriv->pVBoxNetFlt))
261 {
262 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
263 PCRTNETETHERHDR pEtherHdr;
264 PINTNETTRUNKSWPORT pSwitchPort;
265 uint32_t cbHdrs = skb_headlen(skb);
266
267 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
268 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
269 if ( pEtherHdr
270 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
271 && VALID_PTR(pSwitchPort)
272 && cbHdrs >= 6)
273 {
274 /** @todo consider reference counting, etc. */
275 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
276 if (enmDecision == INTNETSWDECISION_INTNET)
277 {
278 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
279 pBuf->pkt_type = PACKET_OUTGOING;
280 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
281 qdisc_drop(skb, sch);
282 ++sch->bstats.packets;
283 sch->bstats.bytes += qdisc_pkt_len(skb);
284 return NET_XMIT_SUCCESS;
285 }
286 }
287 }
288# endif /* VBOXNETFLT_QDISC_ENQUEUE */
289 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
290 if (rc == NET_XMIT_SUCCESS)
291 {
292 ++sch->q.qlen;
293 ++sch->bstats.packets;
294 sch->bstats.bytes += qdisc_pkt_len(skb);
295 }
296 else
297 ++sch->qstats.drops;
298 return rc;
299}
300
301static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
302{
303 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
304# ifdef VBOXNETFLT_QDISC_ENQUEUE
305 --sch->q.qlen;
306 return pPriv->pChild->dequeue(pPriv->pChild);
307# else /* VBOXNETFLT_QDISC_ENQUEUE */
308 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
309 PCRTNETETHERHDR pEtherHdr;
310 PINTNETTRUNKSWPORT pSwitchPort;
311 struct sk_buff *pSkb;
312
313 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
314
315 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
316 {
317 struct sk_buff *pBuf;
318 INTNETSWDECISION enmDecision;
319 uint32_t cbHdrs;
320
321 --sch->q.qlen;
322
323 if (!VALID_PTR(pPriv->pVBoxNetFlt))
324 break;
325
326 cbHdrs = skb_headlen(pSkb);
327 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
328 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
329 if ( !pEtherHdr
330 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
331 || !VALID_PTR(pSwitchPort)
332 || cbHdrs < 6)
333 break;
334
335 /** @todo consider reference counting, etc. */
336 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
337 if (enmDecision != INTNETSWDECISION_INTNET)
338 break;
339
340 pBuf = skb_copy(pSkb, GFP_ATOMIC);
341 pBuf->pkt_type = PACKET_OUTGOING;
342 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
343 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
344 qdisc_drop(pSkb, sch);
345 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
346 pSkb->data[0], pSkb->data[1], pSkb->data[2],
347 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
348 }
349
350 return pSkb;
351# endif /* VBOXNETFLT_QDISC_ENQUEUE */
352}
353
354# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
355static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
356{
357 int rc;
358 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
359
360 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
361 if (rc == 0)
362 {
363 sch->q.qlen++;
364# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
365 sch->qstats.requeues++;
366# endif
367 }
368
369 return rc;
370}
371# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
372
373static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
374{
375 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
376 unsigned int cbLen;
377
378 if (pPriv->pChild->ops->drop)
379 {
380 cbLen = pPriv->pChild->ops->drop(pPriv->pChild);
381 if (cbLen != 0)
382 {
383 ++sch->qstats.drops;
384 --sch->q.qlen;
385 return cbLen;
386 }
387 }
388
389 return 0;
390}
391
392# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
393static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
394# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
395static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
396# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
397{
398 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
399 struct net_device *pDev = qdisc_dev(sch);
400
401 pPriv->pVBoxNetFlt = NULL;
402
403 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
404 GFP_KERNEL);
405 if (!pPriv->ppSaved)
406 return -ENOMEM;
407
408 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
409 &pfifo_qdisc_ops,
410 TC_H_MAKE(TC_H_MAJ(sch->handle),
411 TC_H_MIN(1)));
412 if (!pPriv->pChild)
413 {
414 kfree(pPriv->ppSaved);
415 pPriv->ppSaved = NULL;
416 return -ENOMEM;
417 }
418
419 return 0;
420}
421
422static void vboxNetFltQdiscReset(struct Qdisc *sch)
423{
424 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
425
426 qdisc_reset(pPriv->pChild);
427 sch->q.qlen = 0;
428 sch->qstats.backlog = 0;
429}
430
431static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
432{
433 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
434 struct net_device *pDev = qdisc_dev(sch);
435
436 qdisc_destroy(pPriv->pChild);
437 pPriv->pChild = NULL;
438
439 if (pPriv->ppSaved)
440 {
441 int i;
442 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
443 if (pPriv->ppSaved[i])
444 qdisc_destroy(pPriv->ppSaved[i]);
445 kfree(pPriv->ppSaved);
446 pPriv->ppSaved = NULL;
447 }
448}
449
450static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
451 struct Qdisc **ppOld)
452{
453 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
454
455 if (pNew == NULL)
456 pNew = &noop_qdisc;
457
458 sch_tree_lock(sch);
459 *ppOld = pPriv->pChild;
460 pPriv->pChild = pNew;
461# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
462 sch->q.qlen = 0;
463# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
464 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
465# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
466 qdisc_reset(*ppOld);
467 sch_tree_unlock(sch);
468
469 return 0;
470}
471
472static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
473{
474 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
475 return pPriv->pChild;
476}
477
478static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
479{
480 return 1;
481}
482
483static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
484{
485}
486
487# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
488static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
489 struct rtattr **tca, unsigned long *arg)
490# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
491static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
492 struct nlattr **tca, unsigned long *arg)
493# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
494{
495 return -ENOSYS;
496}
497
498static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
499{
500 return -ENOSYS;
501}
502
503static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
504{
505 if (!walker->stop) {
506 if (walker->count >= walker->skip)
507 if (walker->fn(sch, 1, walker) < 0) {
508 walker->stop = 1;
509 return;
510 }
511 walker->count++;
512 }
513}
514
515# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
516static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
517{
518 return NULL;
519}
520# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
521
522static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
523 struct sk_buff *skb, struct tcmsg *tcm)
524{
525 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
526
527 if (cl != 1)
528 return -ENOENT;
529
530 tcm->tcm_handle |= TC_H_MIN(1);
531 tcm->tcm_info = pPriv->pChild->handle;
532
533 return 0;
534}
535
536
537static struct Qdisc_class_ops g_VBoxNetFltClassOps =
538{
539 .graft = vboxNetFltClassGraft,
540 .leaf = vboxNetFltClassLeaf,
541 .get = vboxNetFltClassGet,
542 .put = vboxNetFltClassPut,
543 .change = vboxNetFltClassChange,
544 .delete = vboxNetFltClassDelete,
545 .walk = vboxNetFltClassWalk,
546# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
547 .tcf_chain = vboxNetFltClassFindTcf,
548# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
549 .dump = vboxNetFltClassDump,
550};
551
552
553static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
554 .cl_ops = &g_VBoxNetFltClassOps,
555 .id = "vboxnetflt",
556 .priv_size = sizeof(struct VBoxNetQDiscPriv),
557 .enqueue = vboxNetFltQdiscEnqueue,
558 .dequeue = vboxNetFltQdiscDequeue,
559# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
560 .requeue = vboxNetFltQdiscRequeue,
561# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
562 .peek = qdisc_peek_dequeued,
563# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
564 .drop = vboxNetFltQdiscDrop,
565 .init = vboxNetFltQdiscInit,
566 .reset = vboxNetFltQdiscReset,
567 .destroy = vboxNetFltQdiscDestroy,
568 .owner = THIS_MODULE
569};
570
571/*
572 * If our qdisc is already attached to the device (that means the user
573 * installed it from command line with 'tc' command) we simply update
574 * the pointer to vboxnetflt instance in qdisc's private structure.
575 * Otherwise we need to take some additional steps:
576 * - Create our qdisc;
577 * - Save all references to qdiscs;
578 * - Replace our child with the first qdisc reference;
579 * - Replace all references so they point to our qdisc.
580 */
581static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
582{
583# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
584 int i;
585# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
586 PVBOXNETQDISCPRIV pPriv;
587
588 struct Qdisc *pExisting = QDISC_GET(pDev);
589 /* Do not install our qdisc for devices with no TX queues */
590 if (!pExisting->enqueue)
591 return;
592 if (strcmp(pExisting->ops->id, "vboxnetflt"))
593 {
594 /* The existing qdisc is different from ours, let's create new one. */
595 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
596 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
597 if (!pNew)
598 return; // TODO: Error?
599
600 if (!try_module_get(THIS_MODULE))
601 {
602 /*
603 * This may cause a memory leak but calling qdisc_destroy()
604 * is not an option as it will call module_put().
605 */
606 return;
607 }
608 pPriv = qdisc_priv(pNew);
609
610 qdisc_destroy(pPriv->pChild);
611 pPriv->pChild = QDISC_GET(pDev);
612 atomic_inc(&pPriv->pChild->refcnt);
613 /*
614 * There is no need in deactivating the device or acquiring any locks
615 * prior changing qdiscs since we do not destroy the old qdisc.
616 * Atomic replacement of pointers is enough.
617 */
618 /*
619 * No need to change reference counters here as we merely move
620 * the pointer and the reference counter of the newly allocated
621 * qdisc is already 1.
622 */
623# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
624 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
625 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
626 ASMAtomicWritePtr(&pDev->qdisc, pNew);
627# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
628 for (i = 0; i < pDev->num_tx_queues; i++)
629 {
630 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
631
632 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
633 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
634 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
635 if (i)
636 atomic_inc(&pNew->refcnt);
637 }
638 /* Newer kernels store root qdisc in netdev structure as well. */
639# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
640 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
641 ASMAtomicWritePtr(&pDev->qdisc, pNew);
642 atomic_inc(&pNew->refcnt);
643# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
644# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
645 /* Sync the queue len with our child */
646 pNew->q.qlen = pPriv->pChild->q.qlen;
647 }
648 else
649 {
650 /* We already have vboxnetflt qdisc, let's use it. */
651 pPriv = qdisc_priv(pExisting);
652 }
653 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
654 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
655}
656
657static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
658{
659# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
660 int i;
661# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
662 PVBOXNETQDISCPRIV pPriv;
663 struct Qdisc *pQdisc, *pChild;
664 if (!pDev)
665 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
666 if (!VALID_PTR(pDev))
667 {
668 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
669 pDev);
670 return; // TODO: Consider returing an error
671 }
672
673
674 pQdisc = QDISC_GET(pDev);
675 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
676 {
677 if (pQdisc->enqueue)
678 {
679 /* Looks like the user has replaced our qdisc manually. */
680 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
681 pQdisc->ops->id);
682 }
683 return; // TODO: Consider returing an error
684 }
685
686 pPriv = qdisc_priv(pQdisc);
687 Assert(pPriv->pVBoxNetFlt == pThis);
688 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
689 pChild = ASMAtomicXchgPtrT(&pPriv->pChild, &noop_qdisc, struct Qdisc *);
690 qdisc_destroy(pChild); /* It won't be the last reference. */
691
692 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
693 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
694# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
695 /* Play it safe, make sure the qdisc is not being used. */
696 if (pPriv->ppSaved[0])
697 {
698 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
699 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
700 pPriv->ppSaved[0] = NULL;
701 while (QDISC_IS_BUSY(pDev, pQdisc))
702 yield();
703 qdisc_destroy(pQdisc); /* Destroy reference */
704 }
705# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
706 for (i = 0; i < pDev->num_tx_queues; i++)
707 {
708 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
709 if (pPriv->ppSaved[i])
710 {
711 Assert(pQueue->qdisc_sleeping == pQdisc);
712 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
713 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
714 pPriv->ppSaved[i] = NULL;
715 while (QDISC_IS_BUSY(pDev, pQdisc))
716 yield();
717 qdisc_destroy(pQdisc); /* Destroy reference */
718 }
719 }
720 /* Newer kernels store root qdisc in netdev structure as well. */
721# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
722 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
723 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
724 while (QDISC_IS_BUSY(pDev, pQdisc))
725 yield();
726 qdisc_destroy(pQdisc); /* Destroy reference */
727# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
728# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
729
730 /*
731 * At this point all references to our qdisc should be gone
732 * unless the user had installed it manually.
733 */
734 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
735}
736
737#endif /* VBOXNETFLT_WITH_QDISC */
738
739
740/**
741 * Initialize module.
742 *
743 * @returns appropriate status code.
744 */
745static int __init VBoxNetFltLinuxInit(void)
746{
747 int rc;
748 /*
749 * Initialize IPRT.
750 */
751 rc = RTR0Init(0);
752 if (RT_SUCCESS(rc))
753 {
754 Log(("VBoxNetFltLinuxInit\n"));
755
756 /*
757 * Initialize the globals and connect to the support driver.
758 *
759 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
760 * for establishing the connect to the support driver.
761 */
762 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
763 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
764 if (RT_SUCCESS(rc))
765 {
766#ifdef VBOXNETFLT_WITH_QDISC
767 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
768 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
769 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
770 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
771 if (rc)
772 {
773 LogRel(("VBoxNetFlt: Failed to registered qdisc: %d\n", rc));
774 return rc;
775 }
776#endif /* VBOXNETFLT_WITH_QDISC */
777 LogRel(("VBoxNetFlt: Successfully started.\n"));
778 return 0;
779 }
780
781 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
782 RTR0Term();
783 }
784 else
785 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
786
787 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
788 return -RTErrConvertToErrno(rc);
789}
790
791
792/**
793 * Unload the module.
794 *
795 * @todo We have to prevent this if we're busy!
796 */
797static void __exit VBoxNetFltLinuxUnload(void)
798{
799 int rc;
800 Log(("VBoxNetFltLinuxUnload\n"));
801 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
802
803#ifdef VBOXNETFLT_WITH_QDISC
804 unregister_qdisc(&g_VBoxNetFltQDiscOps);
805#endif /* VBOXNETFLT_WITH_QDISC */
806 /*
807 * Undo the work done during start (in reverse order).
808 */
809 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
810 AssertRC(rc); NOREF(rc);
811
812 RTR0Term();
813
814 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
815
816 Log(("VBoxNetFltLinuxUnload - done\n"));
817}
818
819
820/**
821 * Experiment where we filter traffic from the host to the internal network
822 * before it reaches the NIC driver.
823 *
824 * The current code uses a very ugly hack and only works on kernels using the
825 * net_device_ops (>= 2.6.29). It has been shown to give us a
826 * performance boost of 60-100% though. So, we have to find some less hacky way
827 * of getting this job done eventually.
828 *
829 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
830 */
831#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
832
833# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
834
835# include <linux/ethtool.h>
836
837typedef struct ethtool_ops OVR_OPSTYPE;
838# define OVR_OPS ethtool_ops
839# define OVR_XMIT pfnStartXmit
840
841# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
842
843typedef struct net_device_ops OVR_OPSTYPE;
844# define OVR_OPS netdev_ops
845# define OVR_XMIT pOrgOps->ndo_start_xmit
846
847# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
848
849/**
850 * The overridden net_device_ops of the device we're attached to.
851 *
852 * As there is no net_device_ops structure in pre-2.6.29 kernels we override
853 * ethtool_ops instead along with hard_start_xmit callback in net_device
854 * structure.
855 *
856 * This is a very dirty hack that was created to explore how much we can improve
857 * the host to guest transfers by not CC'ing the NIC. It turns out to be
858 * the only way to filter outgoing packets for devices without TX queue.
859 */
860typedef struct VBoxNetDeviceOpsOverride
861{
862 /** Our overridden ops. */
863 OVR_OPSTYPE Ops;
864 /** Magic word. */
865 uint32_t u32Magic;
866 /** Pointer to the original ops. */
867 OVR_OPSTYPE const *pOrgOps;
868# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
869 /** Pointer to the original hard_start_xmit function. */
870 int (*pfnStartXmit)(struct sk_buff *pSkb, struct net_device *pDev);
871# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
872 /** Pointer to the net filter instance. */
873 PVBOXNETFLTINS pVBoxNetFlt;
874 /** The number of filtered packages. */
875 uint64_t cFiltered;
876 /** The total number of packets */
877 uint64_t cTotal;
878} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
879/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
880#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
881
882/**
883 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
884 * because they belong on the internal network.
885 *
886 * @returns NETDEV_TX_XXX.
887 * @param pSkb The socket buffer to transmit.
888 * @param pDev The net device.
889 */
890static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
891{
892 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
893 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
894 PCRTNETETHERHDR pEtherHdr;
895 PINTNETTRUNKSWPORT pSwitchPort;
896 uint32_t cbHdrs;
897
898
899 /*
900 * Validate the override structure.
901 *
902 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
903 * to be production quality code, we would have to be much more
904 * careful here and avoid the race.
905 */
906 if ( !VALID_PTR(pOverride)
907 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
908# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
909 || !VALID_PTR(pOverride->pOrgOps)
910# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
911 )
912 {
913 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
914 dev_kfree_skb(pSkb);
915 return NETDEV_TX_OK;
916 }
917 pOverride->cTotal++;
918
919 /*
920 * Do the filtering base on the default OUI of our virtual NICs
921 *
922 * Note! In a real solution, we would ask the switch whether the
923 * destination MAC is 100% to be on the internal network and then
924 * drop it.
925 */
926 cbHdrs = skb_headlen(pSkb);
927 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
928 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
929 if ( pEtherHdr
930 && VALID_PTR(pOverride->pVBoxNetFlt)
931 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
932 && VALID_PTR(pSwitchPort)
933 && cbHdrs >= 6)
934 {
935 INTNETSWDECISION enmDecision;
936
937 /** @todo consider reference counting, etc. */
938 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
939 if (enmDecision == INTNETSWDECISION_INTNET)
940 {
941 dev_kfree_skb(pSkb);
942 pOverride->cFiltered++;
943 return NETDEV_TX_OK;
944 }
945 }
946
947 return pOverride->OVR_XMIT(pSkb, pDev);
948}
949
950/**
951 * Hooks the device ndo_start_xmit operation of the device.
952 *
953 * @param pThis The net filter instance.
954 * @param pDev The net device.
955 */
956static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
957{
958 PVBOXNETDEVICEOPSOVERRIDE pOverride;
959 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
960
961 pOverride = RTMemAlloc(sizeof(*pOverride));
962 if (!pOverride)
963 return;
964 pOverride->pOrgOps = pDev->OVR_OPS;
965 /* We only need to save ethtool_ops structure if it is present (#5712) */
966 if (VALID_PTR(pDev->OVR_OPS))
967 pOverride->Ops = *pDev->OVR_OPS;
968# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
969 pOverride->pfnStartXmit = pDev->hard_start_xmit;
970# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
971 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
972# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
973 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
974 pOverride->cTotal = 0;
975 pOverride->cFiltered = 0;
976 pOverride->pVBoxNetFlt = pThis;
977
978 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
979 ASMAtomicWritePtr((void * volatile *)&pDev->OVR_OPS, pOverride);
980# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
981 ASMAtomicXchgPtr((void * volatile *)&pDev->hard_start_xmit, vboxNetFltLinuxStartXmitFilter);
982# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
983 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
984}
985
986/**
987 * Undos what vboxNetFltLinuxHookDev did.
988 *
989 * @param pThis The net filter instance.
990 * @param pDev The net device. Can be NULL, in which case
991 * we'll try retrieve it from @a pThis.
992 */
993static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
994{
995 PVBOXNETDEVICEOPSOVERRIDE pOverride;
996 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
997
998 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
999 if (!pDev)
1000 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1001 if (VALID_PTR(pDev))
1002 {
1003 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
1004 if ( VALID_PTR(pOverride)
1005 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
1006 && VALID_PTR(pOverride->pOrgOps)
1007 )
1008 {
1009# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
1010 ASMAtomicWritePtr((void * volatile *)&pDev->hard_start_xmit, pOverride->pfnStartXmit);
1011# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
1012 ASMAtomicWritePtr((void const * volatile *)&pDev->OVR_OPS, pOverride->pOrgOps);
1013 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
1014 }
1015 else
1016 pOverride = NULL;
1017 }
1018 else
1019 pOverride = NULL;
1020 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1021
1022 if (pOverride)
1023 {
1024 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
1025 RTMemFree(pOverride);
1026 }
1027}
1028
1029#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
1030
1031
1032/**
1033 * Reads and retains the host interface handle.
1034 *
1035 * @returns The handle, NULL if detached.
1036 * @param pThis
1037 */
1038DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
1039{
1040#if 0
1041 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1042 struct net_device *pDev = NULL;
1043
1044 Log(("vboxNetFltLinuxRetainNetDev\n"));
1045 /*
1046 * Be careful here to avoid problems racing the detached callback.
1047 */
1048 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1049 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
1050 {
1051 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1052 if (pDev)
1053 {
1054 dev_hold(pDev);
1055 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n",
1056 pDev, pDev->name,
1057#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1058 netdev_refcnt_read(pDev)
1059#else
1060 atomic_read(&pDev->refcnt)
1061#endif
1062 ));
1063 }
1064 }
1065 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1066
1067 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
1068 return pDev;
1069#else
1070 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1071#endif
1072}
1073
1074
1075/**
1076 * Release the host interface handle previously retained
1077 * by vboxNetFltLinuxRetainNetDev.
1078 *
1079 * @param pThis The instance.
1080 * @param pDev The vboxNetFltLinuxRetainNetDev
1081 * return value, NULL is fine.
1082 */
1083DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
1084{
1085#if 0
1086 Log(("vboxNetFltLinuxReleaseNetDev\n"));
1087 NOREF(pThis);
1088 if (pDev)
1089 {
1090 dev_put(pDev);
1091 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n",
1092 pDev, pDev->name,
1093#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1094 netdev_refcnt_read(pDev)
1095#else
1096 atomic_read(&pDev->refcnt)
1097#endif
1098 ));
1099 }
1100 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1101#endif
1102}
1103
1104#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1105#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1106
1107/**
1108 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1109 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1110 *
1111 * @returns true / false accordingly.
1112 * @param pBuf The sk_buff.
1113 */
1114DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1115{
1116 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1117}
1118
1119
1120/**
1121 * Internal worker that create a linux sk_buff for a
1122 * (scatter/)gather list.
1123 *
1124 * @returns Pointer to the sk_buff.
1125 * @param pThis The instance.
1126 * @param pSG The (scatter/)gather list.
1127 * @param fDstWire Set if the destination is the wire.
1128 */
1129static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1130{
1131 struct sk_buff *pPkt;
1132 struct net_device *pDev;
1133 unsigned fGsoType = 0;
1134
1135 if (pSG->cbTotal == 0)
1136 {
1137 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1138 return NULL;
1139 }
1140
1141 /** @todo We should use fragments mapping the SG buffers with large packets.
1142 * 256 bytes seems to be the a threshold used a lot for this. It
1143 * requires some nasty work on the intnet side though... */
1144 /*
1145 * Allocate a packet and copy over the data.
1146 */
1147 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1148 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1149 if (RT_UNLIKELY(!pPkt))
1150 {
1151 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1152 pSG->pvUserData = NULL;
1153 return NULL;
1154 }
1155 pPkt->dev = pDev;
1156 pPkt->ip_summed = CHECKSUM_NONE;
1157
1158 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1159 skb_reserve(pPkt, NET_IP_ALIGN);
1160
1161 /* Copy the segments. */
1162 skb_put(pPkt, pSG->cbTotal);
1163 IntNetSgRead(pSG, pPkt->data);
1164
1165#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1166 /*
1167 * Setup GSO if used by this packet.
1168 */
1169 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1170 {
1171 default:
1172 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1173 /* fall thru */
1174 case PDMNETWORKGSOTYPE_INVALID:
1175 fGsoType = 0;
1176 break;
1177 case PDMNETWORKGSOTYPE_IPV4_TCP:
1178 fGsoType = SKB_GSO_TCPV4;
1179 break;
1180 case PDMNETWORKGSOTYPE_IPV4_UDP:
1181 fGsoType = SKB_GSO_UDP;
1182 break;
1183 case PDMNETWORKGSOTYPE_IPV6_TCP:
1184 fGsoType = SKB_GSO_TCPV6;
1185 break;
1186 }
1187 if (fGsoType)
1188 {
1189 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1190
1191 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1192 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1193 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1194
1195 /*
1196 * We need to set checksum fields even if the packet goes to the host
1197 * directly as it may be immediately forwarded by IP layer @bugref{5020}.
1198 */
1199 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1200 pPkt->ip_summed = CHECKSUM_PARTIAL;
1201# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1202 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1203 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1204 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1205 else
1206 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1207# else
1208 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1209 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1210 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1211 else
1212 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1213# endif
1214 if (!fDstWire)
1215 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, PDMNETCSUMTYPE_PSEUDO);
1216 }
1217#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1218
1219 /*
1220 * Finish up the socket buffer.
1221 */
1222 pPkt->protocol = eth_type_trans(pPkt, pDev);
1223 if (fDstWire)
1224 {
1225 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1226
1227 /* Restore ethernet header back. */
1228 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1229 VBOX_SKB_RESET_MAC_HDR(pPkt);
1230 }
1231 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1232
1233 return pPkt;
1234}
1235
1236
1237/**
1238 * Initializes a SG list from an sk_buff.
1239 *
1240 * @returns Number of segments.
1241 * @param pThis The instance.
1242 * @param pBuf The sk_buff.
1243 * @param pSG The SG.
1244 * @param pvFrame The frame pointer, optional.
1245 * @param cSegs The number of segments allocated for the SG.
1246 * This should match the number in the mbuf exactly!
1247 * @param fSrc The source of the frame.
1248 * @param pGso Pointer to the GSO context if it's a GSO
1249 * internal network frame. NULL if regular frame.
1250 */
1251DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1252 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1253{
1254 int i;
1255 NOREF(pThis);
1256
1257 Assert(!skb_shinfo(pBuf)->frag_list);
1258
1259 if (!pGsoCtx)
1260 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1261 else
1262 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1263
1264#ifdef VBOXNETFLT_SG_SUPPORT
1265 pSG->aSegs[0].cb = skb_headlen(pBuf);
1266 pSG->aSegs[0].pv = pBuf->data;
1267 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1268
1269 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1270 {
1271 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1272 pSG->aSegs[i+1].cb = pFrag->size;
1273 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1274 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1275 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1276 }
1277 ++i;
1278
1279#else
1280 pSG->aSegs[0].cb = pBuf->len;
1281 pSG->aSegs[0].pv = pBuf->data;
1282 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1283 i = 1;
1284#endif
1285
1286 pSG->cSegsUsed = i;
1287
1288#ifdef PADD_RUNT_FRAMES_FROM_HOST
1289 /*
1290 * Add a trailer if the frame is too small.
1291 *
1292 * Since we're getting to the packet before it is framed, it has not
1293 * yet been padded. The current solution is to add a segment pointing
1294 * to a buffer containing all zeros and pray that works for all frames...
1295 */
1296 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1297 {
1298 static uint8_t const s_abZero[128] = {0};
1299
1300 AssertReturnVoid(i < cSegs);
1301
1302 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1303 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1304 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1305 pSG->cbTotal = 60;
1306 pSG->cSegsUsed++;
1307 Assert(i + 1 <= pSG->cSegsAlloc)
1308 }
1309#endif
1310
1311 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1312 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1313 for (i = 0; i < pSG->cSegsUsed; i++)
1314 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1315 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1316}
1317
1318/**
1319 * Packet handler,
1320 *
1321 * @returns 0 or EJUSTRETURN.
1322 * @param pThis The instance.
1323 * @param pMBuf The mbuf.
1324 * @param pvFrame The start of the frame, optional.
1325 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1326 * @param eProtocol The protocol.
1327 */
1328#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1329static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1330 struct net_device *pSkbDev,
1331 struct packet_type *pPacketType,
1332 struct net_device *pOrigDev)
1333#else
1334static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1335 struct net_device *pSkbDev,
1336 struct packet_type *pPacketType)
1337#endif
1338{
1339 PVBOXNETFLTINS pThis;
1340 struct net_device *pDev;
1341 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1342 pBuf, pSkbDev, pPacketType));
1343#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1344 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1345 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1346# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1347 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1348# endif
1349#else
1350 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1351 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1352#endif
1353 /*
1354 * Drop it immediately?
1355 */
1356 if (!pBuf)
1357 return 0;
1358
1359 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1360 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1361 if (pDev != pSkbDev)
1362 {
1363 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1364 return 0;
1365 }
1366
1367 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1368 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1369 {
1370 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1371 dev_kfree_skb(pBuf);
1372 return 0;
1373 }
1374
1375#ifndef VBOXNETFLT_SG_SUPPORT
1376 {
1377 /*
1378 * Get rid of fragmented packets, they cause too much trouble.
1379 */
1380 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1381 kfree_skb(pBuf);
1382 if (!pCopy)
1383 {
1384 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1385 return 0;
1386 }
1387 pBuf = pCopy;
1388# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1389 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1390 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1391# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1392 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1393# endif
1394# else
1395 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1396 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1397# endif
1398 }
1399#endif
1400
1401#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1402 /* Forward it to the internal network. */
1403 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1404#else
1405 /* Add the packet to transmit queue and schedule the bottom half. */
1406 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1407 schedule_work(&pThis->u.s.XmitTask);
1408 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1409 &pThis->u.s.XmitTask, pBuf));
1410#endif
1411
1412 /* It does not really matter what we return, it is ignored by the kernel. */
1413 return 0;
1414}
1415
1416/**
1417 * Calculate the number of INTNETSEG segments the socket buffer will need.
1418 *
1419 * @returns Segment count.
1420 * @param pBuf The socket buffer.
1421 */
1422DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1423{
1424#ifdef VBOXNETFLT_SG_SUPPORT
1425 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1426#else
1427 unsigned cSegs = 1;
1428#endif
1429#ifdef PADD_RUNT_FRAMES_FROM_HOST
1430 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1431 if (pBuf->len < 60)
1432 cSegs++;
1433#endif
1434 return cSegs;
1435}
1436
1437/**
1438 * Destroy the intnet scatter / gather buffer created by
1439 * vboxNetFltLinuxSkBufToSG.
1440 */
1441static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1442{
1443#ifdef VBOXNETFLT_SG_SUPPORT
1444 int i;
1445
1446 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1447 {
1448 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1449 kunmap(pSG->aSegs[i+1].pv);
1450 }
1451#endif
1452 NOREF(pSG);
1453}
1454
1455#ifdef LOG_ENABLED
1456/**
1457 * Logging helper.
1458 */
1459static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1460{
1461 uint8_t *pInt, *pExt;
1462 static int iPacketNo = 1;
1463 iPacketNo += iIncrement;
1464 if (fEgress)
1465 {
1466 pExt = pSG->aSegs[0].pv;
1467 pInt = pExt + 6;
1468 }
1469 else
1470 {
1471 pInt = pSG->aSegs[0].pv;
1472 pExt = pInt + 6;
1473 }
1474 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1475 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1476 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1477 fEgress ? "-->" : "<--", pszWhere,
1478 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1479 pSG->cbTotal, iPacketNo));
1480 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1481}
1482#else
1483# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1484#endif
1485
1486#ifdef VBOXNETFLT_WITH_GSO_RECV
1487
1488/**
1489 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1490 * GSO socket buffer without having to segment it.
1491 *
1492 * @returns true on success, false if needs segmenting.
1493 * @param pThis The net filter instance.
1494 * @param pSkb The GSO socket buffer.
1495 * @param fSrc The source.
1496 * @param pGsoCtx Where to return the GSO context on success.
1497 */
1498static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1499 PPDMNETWORKGSO pGsoCtx)
1500{
1501 PDMNETWORKGSOTYPE enmGsoType;
1502 uint16_t uEtherType;
1503 unsigned int cbTransport;
1504 unsigned int offTransport;
1505 unsigned int cbTransportHdr;
1506 unsigned uProtocol;
1507 union
1508 {
1509 RTNETIPV4 IPv4;
1510 RTNETIPV6 IPv6;
1511 RTNETTCP Tcp;
1512 uint8_t ab[40];
1513 uint16_t au16[40/2];
1514 uint32_t au32[40/4];
1515 } Buf;
1516
1517 /*
1518 * Check the GSO properties of the socket buffer and make sure it fits.
1519 */
1520 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1521 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1522 {
1523 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1524 return false;
1525 }
1526 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1527 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1528 {
1529 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1530 return false;
1531 }
1532 /*
1533 * It is possible to receive GSO packets from wire if GRO is enabled.
1534 */
1535 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1536 {
1537 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1538#ifdef VBOXNETFLT_WITH_GRO
1539 /*
1540 * The packet came from the wire and the driver has already consumed
1541 * mac header. We need to restore it back.
1542 */
1543 pSkb->mac_len = skb_network_header(pSkb) - skb_mac_header(pSkb);
1544 skb_push(pSkb, pSkb->mac_len);
1545 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1546 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1547#else /* !VBOXNETFLT_WITH_GRO */
1548 /* Older kernels didn't have GRO. */
1549 return false;
1550#endif /* !VBOXNETFLT_WITH_GRO */
1551 }
1552 else
1553 {
1554 /*
1555 * skb_gso_segment does the following. Do we need to do it as well?
1556 */
1557#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1558 skb_reset_mac_header(pSkb);
1559 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1560#else
1561 pSkb->mac.raw = pSkb->data;
1562 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1563#endif
1564 }
1565
1566 /*
1567 * Switch on the ethertype.
1568 */
1569 uEtherType = pSkb->protocol;
1570 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1571 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1572 {
1573 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1574 if (puEtherType)
1575 uEtherType = *puEtherType;
1576 }
1577 switch (uEtherType)
1578 {
1579 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1580 {
1581 unsigned int cbHdr;
1582 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1583 if (RT_UNLIKELY(!pIPv4))
1584 {
1585 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1586 return false;
1587 }
1588
1589 cbHdr = pIPv4->ip_hl * 4;
1590 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1591 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1592 || cbHdr > cbTransport ))
1593 {
1594 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1595 return false;
1596 }
1597 cbTransport -= cbHdr;
1598 offTransport = pSkb->mac_len + cbHdr;
1599 uProtocol = pIPv4->ip_p;
1600 if (uProtocol == RTNETIPV4_PROT_TCP)
1601 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1602 else if (uProtocol == RTNETIPV4_PROT_UDP)
1603 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1604 else /** @todo IPv6: 4to6 tunneling */
1605 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1606 break;
1607 }
1608
1609 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1610 {
1611 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1612 if (RT_UNLIKELY(!pIPv6))
1613 {
1614 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1615 return false;
1616 }
1617
1618 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1619 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1620 uProtocol = pIPv6->ip6_nxt;
1621 /** @todo IPv6: Dig our way out of the other headers. */
1622 if (uProtocol == RTNETIPV4_PROT_TCP)
1623 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1624 else if (uProtocol == RTNETIPV4_PROT_UDP)
1625 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1626 else
1627 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1628 break;
1629 }
1630
1631 default:
1632 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1633 return false;
1634 }
1635
1636 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1637 {
1638 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1639 return false;
1640 }
1641
1642 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1643 || offTransport + cbTransport > pSkb->len
1644 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1645 {
1646 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1647 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1648 return false;
1649 }
1650
1651 /*
1652 * Check the TCP/UDP bits.
1653 */
1654 if (uProtocol == RTNETIPV4_PROT_TCP)
1655 {
1656 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1657 if (RT_UNLIKELY(!pTcp))
1658 {
1659 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1660 return false;
1661 }
1662
1663 cbTransportHdr = pTcp->th_off * 4;
1664 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1665 || cbTransportHdr > cbTransport
1666 || offTransport + cbTransportHdr >= UINT8_MAX
1667 || offTransport + cbTransportHdr >= pSkb->len ))
1668 {
1669 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1670 return false;
1671 }
1672
1673 }
1674 else
1675 {
1676 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1677 cbTransportHdr = sizeof(RTNETUDP);
1678 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1679 || offTransport + cbTransportHdr >= pSkb->len ))
1680 {
1681 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1682 return false;
1683 }
1684 }
1685
1686 /*
1687 * We're good, init the GSO context.
1688 */
1689 pGsoCtx->u8Type = enmGsoType;
1690 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1691 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1692 pGsoCtx->offHdr1 = pSkb->mac_len;
1693 pGsoCtx->offHdr2 = offTransport;
1694 pGsoCtx->au8Unused[0] = 0;
1695 pGsoCtx->au8Unused[1] = 0;
1696
1697 return true;
1698}
1699
1700/**
1701 * Forward the socket buffer as a GSO internal network frame.
1702 *
1703 * @returns IPRT status code.
1704 * @param pThis The net filter instance.
1705 * @param pSkb The GSO socket buffer.
1706 * @param fSrc The source.
1707 * @param pGsoCtx Where to return the GSO context on success.
1708 */
1709static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1710{
1711 int rc;
1712 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1713 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1714 {
1715 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1716 if (RT_LIKELY(pSG))
1717 {
1718 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1719
1720 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1721 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1722
1723 vboxNetFltLinuxDestroySG(pSG);
1724 rc = VINF_SUCCESS;
1725 }
1726 else
1727 {
1728 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1729 rc = VERR_NO_MEMORY;
1730 }
1731 }
1732 else
1733 {
1734 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1735 rc = VERR_INTERNAL_ERROR_3;
1736 }
1737
1738 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1739 dev_kfree_skb(pSkb);
1740 return rc;
1741}
1742
1743#endif /* VBOXNETFLT_WITH_GSO_RECV */
1744
1745/**
1746 * Worker for vboxNetFltLinuxForwardToIntNet.
1747 *
1748 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1749 * @param pThis The net filter instance.
1750 * @param pBuf The socket buffer.
1751 * @param fSrc The source.
1752 */
1753static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1754{
1755 int rc;
1756 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1757 if (cSegs <= MAX_SKB_FRAGS + 1)
1758 {
1759 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1760 if (RT_LIKELY(pSG))
1761 {
1762 if (fSrc & INTNETTRUNKDIR_WIRE)
1763 {
1764 /*
1765 * The packet came from wire, ethernet header was removed by device driver.
1766 * Restore it.
1767 */
1768 skb_push(pBuf, ETH_HLEN);
1769 }
1770
1771 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1772
1773 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1774 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1775
1776 vboxNetFltLinuxDestroySG(pSG);
1777 rc = VINF_SUCCESS;
1778 }
1779 else
1780 {
1781 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1782 rc = VERR_NO_MEMORY;
1783 }
1784 }
1785 else
1786 {
1787 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1788 rc = VERR_INTERNAL_ERROR_3;
1789 }
1790
1791 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1792 dev_kfree_skb(pBuf);
1793 return rc;
1794}
1795
1796/**
1797 *
1798 * @param pBuf The socket buffer. This is consumed by this function.
1799 */
1800static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1801{
1802 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1803
1804#ifdef VBOXNETFLT_WITH_GSO
1805 if (skb_is_gso(pBuf))
1806 {
1807 PDMNETWORKGSO GsoCtx;
1808 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1809 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1810# ifdef VBOXNETFLT_WITH_GSO_RECV
1811 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1812 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1813 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1814 else
1815# endif
1816 {
1817 /* Need to segment the packet */
1818 struct sk_buff *pNext;
1819 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1820 if (IS_ERR(pSegment))
1821 {
1822 dev_kfree_skb(pBuf);
1823 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1824 return;
1825 }
1826
1827 for (; pSegment; pSegment = pNext)
1828 {
1829 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1830 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1831 pNext = pSegment->next;
1832 pSegment->next = 0;
1833 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1834 }
1835 dev_kfree_skb(pBuf);
1836 }
1837 }
1838 else
1839#endif /* VBOXNETFLT_WITH_GSO */
1840 {
1841 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1842 {
1843#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1844 /*
1845 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1846 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1847 * header length from the header itself and reconstruct 'h' pointer
1848 * to TCP (or whatever) header.
1849 */
1850 unsigned char *tmp = pBuf->h.raw;
1851 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1852 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1853#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1854 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1855 {
1856 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1857 dev_kfree_skb(pBuf);
1858 return;
1859 }
1860#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1861 /* Restore the original (wrong) pointer. */
1862 pBuf->h.raw = tmp;
1863#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1864 }
1865 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1866 }
1867}
1868
1869#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1870/**
1871 * Work queue handler that forwards the socket buffers queued by
1872 * vboxNetFltLinuxPacketHandler to the internal network.
1873 *
1874 * @param pWork The work queue.
1875 */
1876# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1877static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1878# else
1879static void vboxNetFltLinuxXmitTask(void *pWork)
1880# endif
1881{
1882 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1883 struct sk_buff *pBuf;
1884
1885 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1886
1887 /*
1888 * Active? Retain the instance and increment the busy counter.
1889 */
1890 if (vboxNetFltTryRetainBusyActive(pThis))
1891 {
1892 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1893 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1894
1895 vboxNetFltRelease(pThis, true /* fBusy */);
1896 }
1897 else
1898 {
1899 /** @todo Shouldn't we just drop the packets here? There is little point in
1900 * making them accumulate when the VM is paused and it'll only waste
1901 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1902 * before start draining the packets (goes for the intnet ring buf
1903 * too)? */
1904 }
1905}
1906#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1907
1908/**
1909 * Reports the GSO capabilities of the hardware NIC.
1910 *
1911 * @param pThis The net filter instance. The caller hold a
1912 * reference to this.
1913 */
1914static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1915{
1916#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1917 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1918 {
1919 struct net_device *pDev;
1920 PINTNETTRUNKSWPORT pSwitchPort;
1921 unsigned int fFeatures;
1922 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1923
1924 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1925
1926 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1927 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1928 if (pDev)
1929 fFeatures = pDev->features;
1930 else
1931 fFeatures = 0;
1932
1933 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1934
1935 if (pThis->pSwitchPort)
1936 {
1937 /* Set/update the GSO capabilities of the NIC. */
1938 uint32_t fGsoCapabilites = 0;
1939 if (fFeatures & NETIF_F_TSO)
1940 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1941 if (fFeatures & NETIF_F_TSO6)
1942 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1943# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1944 if (fFeatures & NETIF_F_UFO)
1945 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1946 if (fFeatures & NETIF_F_UFO)
1947 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1948# endif
1949 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1950 }
1951
1952 vboxNetFltRelease(pThis, true /*fBusy*/);
1953 }
1954#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1955}
1956
1957/**
1958 * Helper that determines whether the host (ignoreing us) is operating the
1959 * interface in promiscuous mode or not.
1960 */
1961static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1962{
1963 bool fRc = false;
1964 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1965 if (pDev)
1966 {
1967 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1968 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1969 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1970 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1971 }
1972 return fRc;
1973}
1974
1975#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1976/**
1977 * Helper for detecting TAP devices.
1978 */
1979static bool vboxNetFltIsTapDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1980{
1981 if (pDev->ethtool_ops && pDev->ethtool_ops->get_drvinfo)
1982 {
1983 struct ethtool_drvinfo Info;
1984
1985 memset(&Info, 0, sizeof(Info));
1986 Info.cmd = ETHTOOL_GDRVINFO;
1987 pDev->ethtool_ops->get_drvinfo(pDev, &Info);
1988 Log3(("vboxNetFltIsTapDevice: driver=%s version=%s bus_info=%s\n",
1989 Info.driver, Info.version, Info.bus_info));
1990
1991 return !strncmp(Info.driver, "tun", 4)
1992 && !strncmp(Info.bus_info, "tap", 4);
1993 }
1994
1995 return false;
1996}
1997
1998/**
1999 * Helper for updating the link state of TAP devices.
2000 * Only TAP devices are affected.
2001 */
2002static void vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
2003{
2004 if (vboxNetFltIsTapDevice(pThis, pDev))
2005 {
2006 Log3(("vboxNetFltSetTapLinkState: bringing %s tap device link state\n",
2007 fLinkUp ? "up" : "down"));
2008 netif_tx_lock_bh(pDev);
2009 if (fLinkUp)
2010 netif_carrier_on(pDev);
2011 else
2012 netif_carrier_off(pDev);
2013 netif_tx_unlock_bh(pDev);
2014 }
2015}
2016#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2017DECLINLINE(void) vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
2018{
2019 /* Nothing to do for pre-2.6.36 kernels. */
2020}
2021#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2022
2023/**
2024 * Internal worker for vboxNetFltLinuxNotifierCallback.
2025 *
2026 * @returns VBox status code.
2027 * @param pThis The instance.
2028 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
2029 * flood the release log.
2030 */
2031static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
2032{
2033 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2034 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
2035
2036 /*
2037 * Retain and store the device.
2038 */
2039 dev_hold(pDev);
2040
2041 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2042 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
2043 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2044
2045 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n",
2046 pDev, pDev->name,
2047#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2048 netdev_refcnt_read(pDev)
2049#else
2050 atomic_read(&pDev->refcnt)
2051#endif
2052 ));
2053 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2054 pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2055
2056 /* Get the mac address while we still have a valid net_device reference. */
2057 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
2058
2059 /*
2060 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
2061 */
2062 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
2063 pThis->u.s.PacketType.dev = pDev;
2064 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
2065 dev_add_pack(&pThis->u.s.PacketType);
2066
2067#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2068 vboxNetFltLinuxHookDev(pThis, pDev);
2069#endif
2070#ifdef VBOXNETFLT_WITH_QDISC
2071 vboxNetFltLinuxQdiscInstall(pThis, pDev);
2072#endif /* VBOXNETFLT_WITH_QDISC */
2073
2074 /*
2075 * If attaching to TAP interface we need to bring the link state up
2076 * starting from 2.6.36 kernel.
2077 */
2078 vboxNetFltSetTapLinkState(pThis, pDev, true);
2079
2080 /*
2081 * Set indicators that require the spinlock. Be abit paranoid about racing
2082 * the device notification handle.
2083 */
2084 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2085 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2086 if (pDev)
2087 {
2088 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
2089 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
2090 pDev = NULL; /* don't dereference it */
2091 }
2092 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2093 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
2094
2095 /*
2096 * If the above succeeded report GSO capabilities, if not undo and
2097 * release the device.
2098 */
2099 if (!pDev)
2100 {
2101 Assert(pThis->pSwitchPort);
2102 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
2103 {
2104 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2105 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
2106 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
2107 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
2108 vboxNetFltRelease(pThis, true /*fBusy*/);
2109 }
2110 }
2111 else
2112 {
2113#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2114 vboxNetFltLinuxUnhookDev(pThis, pDev);
2115#endif
2116#ifdef VBOXNETFLT_WITH_QDISC
2117 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2118#endif /* VBOXNETFLT_WITH_QDISC */
2119 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2120 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2121 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2122 dev_put(pDev);
2123 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n",
2124 pDev, pDev->name,
2125#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2126 netdev_refcnt_read(pDev)
2127#else
2128 atomic_read(&pDev->refcnt)
2129#endif
2130 ));
2131 }
2132
2133 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
2134 return VINF_SUCCESS;
2135}
2136
2137
2138static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
2139{
2140 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2141
2142 Assert(!pThis->fDisconnectedFromHost);
2143
2144#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2145 vboxNetFltLinuxUnhookDev(pThis, pDev);
2146#endif
2147#ifdef VBOXNETFLT_WITH_QDISC
2148 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2149#endif /* VBOXNETFLT_WITH_QDISC */
2150
2151 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2152 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
2153 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
2154 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2155 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2156
2157 dev_remove_pack(&pThis->u.s.PacketType);
2158#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2159 skb_queue_purge(&pThis->u.s.XmitQueue);
2160#endif
2161 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2162 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n",
2163 pDev, pDev->name,
2164#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2165 netdev_refcnt_read(pDev)
2166#else
2167 atomic_read(&pDev->refcnt)
2168#endif
2169 ));
2170 dev_put(pDev);
2171
2172 return NOTIFY_OK;
2173}
2174
2175static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2176{
2177 /* Check if we are not suspended and promiscuous mode has not been set. */
2178 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2179 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2180 {
2181 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2182 dev_set_promiscuity(pDev, 1);
2183 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2184 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2185 }
2186 else
2187 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2188 return NOTIFY_OK;
2189}
2190
2191static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2192{
2193 /* Undo promiscuous mode if we has set it. */
2194 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2195 {
2196 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2197 dev_set_promiscuity(pDev, -1);
2198 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2199 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2200 }
2201 else
2202 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2203 return NOTIFY_OK;
2204}
2205
2206#ifdef LOG_ENABLED
2207/** Stringify the NETDEV_XXX constants. */
2208static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2209{
2210 const char *pszEvent = "NETDRV_<unknown>";
2211 switch (ulEventType)
2212 {
2213 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2214 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2215 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2216 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2217 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2218 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2219 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2220 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2221 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2222 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2223# ifdef NETDEV_FEAT_CHANGE
2224 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2225# endif
2226 }
2227 return pszEvent;
2228}
2229#endif /* LOG_ENABLED */
2230
2231/**
2232 * Callback for listening to netdevice events.
2233 *
2234 * This works the rediscovery, clean up on unregistration, promiscuity on
2235 * up/down, and GSO feature changes from ethtool.
2236 *
2237 * @returns NOTIFY_OK
2238 * @param self Pointer to our notifier registration block.
2239 * @param ulEventType The event.
2240 * @param ptr Event specific, but it is usually the device it
2241 * relates to.
2242 */
2243static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2244
2245{
2246 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2247 struct net_device *pDev = (struct net_device *)ptr;
2248 int rc = NOTIFY_OK;
2249
2250 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2251 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2252 if ( ulEventType == NETDEV_REGISTER
2253 && !strcmp(pDev->name, pThis->szName))
2254 {
2255 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2256 }
2257 else
2258 {
2259 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2260 if (pDev == ptr)
2261 {
2262 switch (ulEventType)
2263 {
2264 case NETDEV_UNREGISTER:
2265 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2266 break;
2267 case NETDEV_UP:
2268 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2269 break;
2270 case NETDEV_GOING_DOWN:
2271 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2272 break;
2273 case NETDEV_CHANGENAME:
2274 break;
2275#ifdef NETDEV_FEAT_CHANGE
2276 case NETDEV_FEAT_CHANGE:
2277 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2278 break;
2279#endif
2280 }
2281 }
2282 }
2283
2284 return rc;
2285}
2286
2287bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2288{
2289 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2290}
2291
2292int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2293{
2294 struct net_device * pDev;
2295 int err;
2296 int rc = VINF_SUCCESS;
2297 NOREF(pvIfData);
2298
2299 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2300
2301 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2302 if (pDev)
2303 {
2304 /*
2305 * Create a sk_buff for the gather list and push it onto the wire.
2306 */
2307 if (fDst & INTNETTRUNKDIR_WIRE)
2308 {
2309 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2310 if (pBuf)
2311 {
2312 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2313 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2314 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2315 err = dev_queue_xmit(pBuf);
2316 if (err)
2317 rc = RTErrConvertFromErrno(err);
2318 }
2319 else
2320 rc = VERR_NO_MEMORY;
2321 }
2322
2323 /*
2324 * Create a sk_buff for the gather list and push it onto the host stack.
2325 */
2326 if (fDst & INTNETTRUNKDIR_HOST)
2327 {
2328 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2329 if (pBuf)
2330 {
2331 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2332 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2333 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2334 err = netif_rx_ni(pBuf);
2335 if (err)
2336 rc = RTErrConvertFromErrno(err);
2337 }
2338 else
2339 rc = VERR_NO_MEMORY;
2340 }
2341
2342 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2343 }
2344
2345 return rc;
2346}
2347
2348
2349void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2350{
2351 struct net_device * pDev;
2352
2353 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2354 pThis, pThis->szName, fActive?"true":"false",
2355 pThis->fDisablePromiscuous?"true":"false"));
2356
2357 if (pThis->fDisablePromiscuous)
2358 return;
2359
2360 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2361 if (pDev)
2362 {
2363 /*
2364 * This api is a bit weird, the best reference is the code.
2365 *
2366 * Also, we have a bit or race conditions wrt the maintenance of
2367 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2368 */
2369#ifdef LOG_ENABLED
2370 u_int16_t fIf;
2371 unsigned const cPromiscBefore = pDev->promiscuity;
2372#endif
2373 if (fActive)
2374 {
2375 Assert(!pThis->u.s.fPromiscuousSet);
2376
2377 rtnl_lock();
2378 dev_set_promiscuity(pDev, 1);
2379 rtnl_unlock();
2380 pThis->u.s.fPromiscuousSet = true;
2381 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2382 }
2383 else
2384 {
2385 if (pThis->u.s.fPromiscuousSet)
2386 {
2387 rtnl_lock();
2388 dev_set_promiscuity(pDev, -1);
2389 rtnl_unlock();
2390 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2391 }
2392 pThis->u.s.fPromiscuousSet = false;
2393
2394#ifdef LOG_ENABLED
2395 fIf = dev_get_flags(pDev);
2396 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2397#endif
2398 }
2399
2400 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2401 }
2402}
2403
2404
2405int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2406{
2407#ifdef VBOXNETFLT_WITH_QDISC
2408 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2409#endif /* VBOXNETFLT_WITH_QDISC */
2410 /*
2411 * Remove packet handler when we get disconnected from internal switch as
2412 * we don't want the handler to forward packets to disconnected switch.
2413 */
2414 dev_remove_pack(&pThis->u.s.PacketType);
2415 return VINF_SUCCESS;
2416}
2417
2418
2419int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2420{
2421 /*
2422 * Report the GSO capabilities of the host and device (if connected).
2423 * Note! No need to mark ourselves busy here.
2424 */
2425 /** @todo duplicate work here now? Attach */
2426#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2427 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2428 0
2429 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2430 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2431# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2432 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2433 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2434# endif
2435 , INTNETTRUNKDIR_HOST);
2436
2437#endif
2438 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2439
2440 return VINF_SUCCESS;
2441}
2442
2443
2444void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2445{
2446 struct net_device *pDev;
2447 bool fRegistered;
2448 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2449
2450#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2451 vboxNetFltLinuxUnhookDev(pThis, NULL);
2452#endif
2453
2454 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2455 * unlikely, but none the less). Since it doesn't actually update the
2456 * state (just reads it), it is likely to panic in some interesting
2457 * ways. */
2458
2459 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2460 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2461 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2462 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2463
2464 if (fRegistered)
2465 {
2466 vboxNetFltSetTapLinkState(pThis, pDev, false);
2467
2468#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2469 skb_queue_purge(&pThis->u.s.XmitQueue);
2470#endif
2471 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2472 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n",
2473 pDev, pDev->name,
2474#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2475 netdev_refcnt_read(pDev)
2476#else
2477 atomic_read(&pDev->refcnt)
2478#endif
2479 ));
2480 dev_put(pDev);
2481 }
2482 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2483 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2484 module_put(THIS_MODULE);
2485}
2486
2487
2488int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2489{
2490 int err;
2491 NOREF(pvContext);
2492
2493 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2494 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2495 if (err)
2496 return VERR_INTNET_FLT_IF_FAILED;
2497 if (!pThis->u.s.fRegistered)
2498 {
2499 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2500 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2501 return VERR_INTNET_FLT_IF_NOT_FOUND;
2502 }
2503
2504 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2505 if ( pThis->fDisconnectedFromHost
2506 || !try_module_get(THIS_MODULE))
2507 return VERR_INTNET_FLT_IF_FAILED;
2508
2509 return VINF_SUCCESS;
2510}
2511
2512int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2513{
2514 /*
2515 * Init the linux specific members.
2516 */
2517 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2518 pThis->u.s.fRegistered = false;
2519 pThis->u.s.fPromiscuousSet = false;
2520 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2521#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2522 skb_queue_head_init(&pThis->u.s.XmitQueue);
2523# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2524 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2525# else
2526 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2527# endif
2528#endif
2529
2530 return VINF_SUCCESS;
2531}
2532
2533
2534void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2535{
2536 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2537}
2538
2539
2540int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2541{
2542 /* Nothing to do */
2543 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2544 return VINF_SUCCESS;
2545}
2546
2547
2548int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2549{
2550 /* Nothing to do */
2551 NOREF(pThis); NOREF(pvIfData);
2552 return VINF_SUCCESS;
2553}
2554
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette