VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 38050

Last change on this file since 38050 was 38050, checked in by vboxsync, 13 years ago

netflt: RHEL5 fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.8 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 38050 2011-07-19 08:18:52Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/vmm/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53#define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
54#ifdef CONFIG_NET_SCHED
55/*# define VBOXNETFLT_WITH_QDISC Comment this out to disable qdisc support */
56# ifdef VBOXNETFLT_WITH_QDISC
57# include <net/pkt_sched.h>
58# endif /* VBOXNETFLT_WITH_QDISC */
59#endif
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
66#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
67#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
68# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
69#endif
70
71#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
72# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
73# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
74#else
75# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
76# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
77#endif
78
79#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
80# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
81#else
82# define CHECKSUM_PARTIAL CHECKSUM_HW
83# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
84# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
85# else
86# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
87# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
88# else
89# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
90# endif
91/* Versions prior 2.6.10 use stats for both bstats and qstats */
92# define bstats stats
93# define qstats stats
94# endif
95#endif
96
97#ifdef VBOXNETFLT_WITH_QDISC
98# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)
99static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
100{
101 kfree_skb(skb);
102 sch->stats.drops++;
103
104 return NET_XMIT_DROP;
105}
106# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) */
107#endif /* VBOXNETFLT_WITH_QDISC */
108
109#ifndef NET_IP_ALIGN
110# define NET_IP_ALIGN 2
111#endif
112
113#if 0
114/** Create scatter / gather segments for fragments. When not used, we will
115 * linearize the socket buffer before creating the internal networking SG. */
116# define VBOXNETFLT_SG_SUPPORT 1
117#endif
118
119#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
120/** Indicates that the linux kernel may send us GSO frames. */
121# define VBOXNETFLT_WITH_GSO 1
122
123/** This enables or disables the transmitting of GSO frame from the internal
124 * network and to the host. */
125# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
126
127# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
128/** This enables or disables the transmitting of GSO frame from the internal
129 * network and to the wire. */
130# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
131# endif
132
133/** This enables or disables the forwarding/flooding of GSO frame from the host
134 * to the internal network. */
135# define VBOXNETFLT_WITH_GSO_RECV 1
136
137#endif
138
139#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
140/** This enables or disables handling of GSO frames coming from the wire (GRO). */
141# define VBOXNETFLT_WITH_GRO 1
142#endif
143/*
144 * GRO support was backported to RHEL 5.4
145 */
146#ifdef RHEL_RELEASE_CODE
147# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
148# define VBOXNETFLT_WITH_GRO 1
149# endif
150#endif
151
152/*******************************************************************************
153* Internal Functions *
154*******************************************************************************/
155static int VBoxNetFltLinuxInit(void);
156static void VBoxNetFltLinuxUnload(void);
157static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
158
159
160/*******************************************************************************
161* Global Variables *
162*******************************************************************************/
163/**
164 * The (common) global data.
165 */
166static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
167
168module_init(VBoxNetFltLinuxInit);
169module_exit(VBoxNetFltLinuxUnload);
170
171MODULE_AUTHOR(VBOX_VENDOR);
172MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
173MODULE_LICENSE("GPL");
174#ifdef MODULE_VERSION
175MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
176#endif
177
178
179#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
180unsigned dev_get_flags(const struct net_device *dev)
181{
182 unsigned flags;
183
184 flags = (dev->flags & ~(IFF_PROMISC |
185 IFF_ALLMULTI |
186 IFF_RUNNING)) |
187 (dev->gflags & (IFF_PROMISC |
188 IFF_ALLMULTI));
189
190 if (netif_running(dev) && netif_carrier_ok(dev))
191 flags |= IFF_RUNNING;
192
193 return flags;
194}
195#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
196
197
198#ifdef VBOXNETFLT_WITH_QDISC
199//#define QDISC_LOG(x) printk x
200# define QDISC_LOG(x) do { } while (0)
201
202# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
203# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
204# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
205# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
206# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
207# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
208# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
209# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(queue, ops, parent)
210# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
211
212# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
213# define qdisc_dev(qdisc) (qdisc->dev)
214# define qdisc_pkt_len(skb) (skb->len)
215# define QDISC_GET(dev) (dev->qdisc_sleeping)
216# else
217# define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
218# endif
219
220# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
221# define QDISC_SAVED_NUM(dev) 1
222# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
223# define QDISC_SAVED_NUM(dev) dev->num_tx_queues
224# else
225# define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
226# endif
227
228# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
229# define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
230# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
231# define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
232 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
233# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
234# define QDISC_IS_BUSY(dev, qdisc) (qdisc_is_running(qdisc) || \
235 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
236# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
237
238struct VBoxNetQDiscPriv
239{
240 /** Pointer to the single child qdisc. */
241 struct Qdisc *pChild;
242 /*
243 * Technically it is possible to have different qdiscs for different TX
244 * queues so we have to save them all.
245 */
246 /** Pointer to the array of saved qdiscs. */
247 struct Qdisc **ppSaved;
248 /** Pointer to the net filter instance. */
249 PVBOXNETFLTINS pVBoxNetFlt;
250};
251typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
252
253//#define VBOXNETFLT_QDISC_ENQUEUE
254static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
255{
256 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
257 int rc;
258
259# ifdef VBOXNETFLT_QDISC_ENQUEUE
260 if (VALID_PTR(pPriv->pVBoxNetFlt))
261 {
262 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
263 PCRTNETETHERHDR pEtherHdr;
264 PINTNETTRUNKSWPORT pSwitchPort;
265 uint32_t cbHdrs = skb_headlen(skb);
266
267 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
268 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
269 if ( pEtherHdr
270 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
271 && VALID_PTR(pSwitchPort)
272 && cbHdrs >= 6)
273 {
274 /** @todo consider reference counting, etc. */
275 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
276 if (enmDecision == INTNETSWDECISION_INTNET)
277 {
278 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
279 pBuf->pkt_type = PACKET_OUTGOING;
280 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
281 qdisc_drop(skb, sch);
282 ++sch->bstats.packets;
283 sch->bstats.bytes += qdisc_pkt_len(skb);
284 return NET_XMIT_SUCCESS;
285 }
286 }
287 }
288# endif /* VBOXNETFLT_QDISC_ENQUEUE */
289 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
290 if (rc == NET_XMIT_SUCCESS)
291 {
292 ++sch->q.qlen;
293 ++sch->bstats.packets;
294 sch->bstats.bytes += qdisc_pkt_len(skb);
295 }
296 else
297 ++sch->qstats.drops;
298 return rc;
299}
300
301static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
302{
303 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
304# ifdef VBOXNETFLT_QDISC_ENQUEUE
305 --sch->q.qlen;
306 return pPriv->pChild->dequeue(pPriv->pChild);
307# else /* VBOXNETFLT_QDISC_ENQUEUE */
308 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
309 PCRTNETETHERHDR pEtherHdr;
310 PINTNETTRUNKSWPORT pSwitchPort;
311 struct sk_buff *pSkb;
312
313 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
314
315 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
316 {
317 struct sk_buff *pBuf;
318 INTNETSWDECISION enmDecision;
319 uint32_t cbHdrs;
320
321 --sch->q.qlen;
322
323 if (!VALID_PTR(pPriv->pVBoxNetFlt))
324 break;
325
326 cbHdrs = skb_headlen(pSkb);
327 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
328 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
329 if ( !pEtherHdr
330 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
331 || !VALID_PTR(pSwitchPort)
332 || cbHdrs < 6)
333 break;
334
335 /** @todo consider reference counting, etc. */
336 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
337 if (enmDecision != INTNETSWDECISION_INTNET)
338 break;
339
340 pBuf = skb_copy(pSkb, GFP_ATOMIC);
341 pBuf->pkt_type = PACKET_OUTGOING;
342 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
343 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
344 qdisc_drop(pSkb, sch);
345 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
346 pSkb->data[0], pSkb->data[1], pSkb->data[2],
347 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
348 }
349
350 return pSkb;
351# endif /* VBOXNETFLT_QDISC_ENQUEUE */
352}
353
354# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
355static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
356{
357 int rc;
358 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
359
360 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
361 if (rc == 0)
362 {
363 sch->q.qlen++;
364# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
365 sch->qstats.requeues++;
366# endif
367 }
368
369 return rc;
370}
371# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
372
373static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
374{
375 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
376 unsigned int cbLen;
377
378 if (pPriv->pChild->ops->drop)
379 {
380 cbLen = pPriv->pChild->ops->drop(pPriv->pChild);
381 if (cbLen != 0)
382 {
383 ++sch->qstats.drops;
384 --sch->q.qlen;
385 return cbLen;
386 }
387 }
388
389 return 0;
390}
391
392# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
393static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
394# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
395static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
396# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
397{
398 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
399 struct net_device *pDev = qdisc_dev(sch);
400
401 pPriv->pVBoxNetFlt = NULL;
402
403 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
404 GFP_KERNEL);
405 if (!pPriv->ppSaved)
406 return -ENOMEM;
407
408 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
409 &pfifo_qdisc_ops,
410 TC_H_MAKE(TC_H_MAJ(sch->handle),
411 TC_H_MIN(1)));
412 if (!pPriv->pChild)
413 {
414 kfree(pPriv->ppSaved);
415 pPriv->ppSaved = NULL;
416 return -ENOMEM;
417 }
418
419 return 0;
420}
421
422static void vboxNetFltQdiscReset(struct Qdisc *sch)
423{
424 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
425
426 qdisc_reset(pPriv->pChild);
427 sch->q.qlen = 0;
428 sch->qstats.backlog = 0;
429}
430
431static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
432{
433 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
434 struct net_device *pDev = qdisc_dev(sch);
435
436 qdisc_destroy(pPriv->pChild);
437 pPriv->pChild = NULL;
438
439 if (pPriv->ppSaved)
440 {
441 int i;
442 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
443 if (pPriv->ppSaved[i])
444 qdisc_destroy(pPriv->ppSaved[i]);
445 kfree(pPriv->ppSaved);
446 pPriv->ppSaved = NULL;
447 }
448}
449
450static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
451 struct Qdisc **ppOld)
452{
453 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
454
455 if (pNew == NULL)
456 pNew = &noop_qdisc;
457
458 sch_tree_lock(sch);
459 *ppOld = pPriv->pChild;
460 pPriv->pChild = pNew;
461# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
462 sch->q.qlen = 0;
463# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
464 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
465# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
466 qdisc_reset(*ppOld);
467 sch_tree_unlock(sch);
468
469 return 0;
470}
471
472static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
473{
474 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
475 return pPriv->pChild;
476}
477
478static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
479{
480 return 1;
481}
482
483static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
484{
485}
486
487# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
488static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
489 struct rtattr **tca, unsigned long *arg)
490# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
491static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
492 struct nlattr **tca, unsigned long *arg)
493# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
494{
495 return -ENOSYS;
496}
497
498static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
499{
500 return -ENOSYS;
501}
502
503static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
504{
505 if (!walker->stop) {
506 if (walker->count >= walker->skip)
507 if (walker->fn(sch, 1, walker) < 0) {
508 walker->stop = 1;
509 return;
510 }
511 walker->count++;
512 }
513}
514
515# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
516static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
517{
518 return NULL;
519}
520# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
521
522static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
523 struct sk_buff *skb, struct tcmsg *tcm)
524{
525 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
526
527 if (cl != 1)
528 return -ENOENT;
529
530 tcm->tcm_handle |= TC_H_MIN(1);
531 tcm->tcm_info = pPriv->pChild->handle;
532
533 return 0;
534}
535
536
537static struct Qdisc_class_ops g_VBoxNetFltClassOps =
538{
539 .graft = vboxNetFltClassGraft,
540 .leaf = vboxNetFltClassLeaf,
541 .get = vboxNetFltClassGet,
542 .put = vboxNetFltClassPut,
543 .change = vboxNetFltClassChange,
544 .delete = vboxNetFltClassDelete,
545 .walk = vboxNetFltClassWalk,
546# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
547 .tcf_chain = vboxNetFltClassFindTcf,
548# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
549 .dump = vboxNetFltClassDump,
550};
551
552
553static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
554 .cl_ops = &g_VBoxNetFltClassOps,
555 .id = "vboxnetflt",
556 .priv_size = sizeof(struct VBoxNetQDiscPriv),
557 .enqueue = vboxNetFltQdiscEnqueue,
558 .dequeue = vboxNetFltQdiscDequeue,
559# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
560 .requeue = vboxNetFltQdiscRequeue,
561# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
562 .peek = qdisc_peek_dequeued,
563# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
564 .drop = vboxNetFltQdiscDrop,
565 .init = vboxNetFltQdiscInit,
566 .reset = vboxNetFltQdiscReset,
567 .destroy = vboxNetFltQdiscDestroy,
568 .owner = THIS_MODULE
569};
570
571/*
572 * If our qdisc is already attached to the device (that means the user
573 * installed it from command line with 'tc' command) we simply update
574 * the pointer to vboxnetflt instance in qdisc's private structure.
575 * Otherwise we need to take some additional steps:
576 * - Create our qdisc;
577 * - Save all references to qdiscs;
578 * - Replace our child with the first qdisc reference;
579 * - Replace all references so they point to our qdisc.
580 */
581static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
582{
583# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
584 int i;
585# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
586 PVBOXNETQDISCPRIV pPriv;
587
588 struct Qdisc *pExisting = QDISC_GET(pDev);
589 /* Do not install our qdisc for devices with no TX queues */
590 if (!pExisting->enqueue)
591 return;
592 if (strcmp(pExisting->ops->id, "vboxnetflt"))
593 {
594 /* The existing qdisc is different from ours, let's create new one. */
595 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
596 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
597 if (!pNew)
598 return; // TODO: Error?
599
600 if (!try_module_get(THIS_MODULE))
601 {
602 /*
603 * This may cause a memory leak but calling qdisc_destroy()
604 * is not an option as it will call module_put().
605 */
606 return;
607 }
608 pPriv = qdisc_priv(pNew);
609
610 qdisc_destroy(pPriv->pChild);
611 pPriv->pChild = QDISC_GET(pDev);
612 atomic_inc(&pPriv->pChild->refcnt);
613 /*
614 * There is no need in deactivating the device or acquiring any locks
615 * prior changing qdiscs since we do not destroy the old qdisc.
616 * Atomic replacement of pointers is enough.
617 */
618 /*
619 * No need to change reference counters here as we merely move
620 * the pointer and the reference counter of the newly allocated
621 * qdisc is already 1.
622 */
623# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
624 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
625 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
626 ASMAtomicWritePtr(&pDev->qdisc, pNew);
627# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
628 for (i = 0; i < pDev->num_tx_queues; i++)
629 {
630 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
631
632 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
633 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
634 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
635 if (i)
636 atomic_inc(&pNew->refcnt);
637 }
638 /* Newer kernels store root qdisc in netdev structure as well. */
639# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
640 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
641 ASMAtomicWritePtr(&pDev->qdisc, pNew);
642 atomic_inc(&pNew->refcnt);
643# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
644# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
645 /* Sync the queue len with our child */
646 pNew->q.qlen = pPriv->pChild->q.qlen;
647 }
648 else
649 {
650 /* We already have vboxnetflt qdisc, let's use it. */
651 pPriv = qdisc_priv(pExisting);
652 }
653 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
654 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
655}
656
657static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
658{
659# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
660 int i;
661# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
662 PVBOXNETQDISCPRIV pPriv;
663 struct Qdisc *pQdisc, *pChild;
664 if (!pDev)
665 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
666 if (!VALID_PTR(pDev))
667 {
668 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
669 pDev);
670 return; // TODO: Consider returing an error
671 }
672
673
674 pQdisc = QDISC_GET(pDev);
675 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
676 {
677 if (pQdisc->enqueue)
678 {
679 /* Looks like the user has replaced our qdisc manually. */
680 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
681 pQdisc->ops->id);
682 }
683 return; // TODO: Consider returing an error
684 }
685
686 pPriv = qdisc_priv(pQdisc);
687 Assert(pPriv->pVBoxNetFlt == pThis);
688 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
689 pChild = ASMAtomicXchgPtrT(&pPriv->pChild, &noop_qdisc, struct Qdisc *);
690 qdisc_destroy(pChild); /* It won't be the last reference. */
691
692 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
693 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
694# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
695 /* Play it safe, make sure the qdisc is not being used. */
696 if (pPriv->ppSaved[0])
697 {
698 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
699 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
700 pPriv->ppSaved[0] = NULL;
701 while (QDISC_IS_BUSY(pDev, pQdisc))
702 yield();
703 qdisc_destroy(pQdisc); /* Destroy reference */
704 }
705# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
706 for (i = 0; i < pDev->num_tx_queues; i++)
707 {
708 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
709 if (pPriv->ppSaved[i])
710 {
711 Assert(pQueue->qdisc_sleeping == pQdisc);
712 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
713 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
714 pPriv->ppSaved[i] = NULL;
715 while (QDISC_IS_BUSY(pDev, pQdisc))
716 yield();
717 qdisc_destroy(pQdisc); /* Destroy reference */
718 }
719 }
720 /* Newer kernels store root qdisc in netdev structure as well. */
721# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
722 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
723 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
724 while (QDISC_IS_BUSY(pDev, pQdisc))
725 yield();
726 qdisc_destroy(pQdisc); /* Destroy reference */
727# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
728# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
729
730 /*
731 * At this point all references to our qdisc should be gone
732 * unless the user had installed it manually.
733 */
734 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
735}
736
737#endif /* VBOXNETFLT_WITH_QDISC */
738
739
740/**
741 * Initialize module.
742 *
743 * @returns appropriate status code.
744 */
745static int __init VBoxNetFltLinuxInit(void)
746{
747 int rc;
748 /*
749 * Initialize IPRT.
750 */
751 rc = RTR0Init(0);
752 if (RT_SUCCESS(rc))
753 {
754 Log(("VBoxNetFltLinuxInit\n"));
755
756 /*
757 * Initialize the globals and connect to the support driver.
758 *
759 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
760 * for establishing the connect to the support driver.
761 */
762 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
763 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
764 if (RT_SUCCESS(rc))
765 {
766#ifdef VBOXNETFLT_WITH_QDISC
767 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
768 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
769 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
770 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
771 if (rc)
772 {
773 LogRel(("VBoxNetFlt: Failed to registered qdisc: %d\n", rc));
774 return rc;
775 }
776#endif /* VBOXNETFLT_WITH_QDISC */
777 LogRel(("VBoxNetFlt: Successfully started.\n"));
778 return 0;
779 }
780
781 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
782 RTR0Term();
783 }
784 else
785 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
786
787 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
788 return -RTErrConvertToErrno(rc);
789}
790
791
792/**
793 * Unload the module.
794 *
795 * @todo We have to prevent this if we're busy!
796 */
797static void __exit VBoxNetFltLinuxUnload(void)
798{
799 int rc;
800 Log(("VBoxNetFltLinuxUnload\n"));
801 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
802
803#ifdef VBOXNETFLT_WITH_QDISC
804 unregister_qdisc(&g_VBoxNetFltQDiscOps);
805#endif /* VBOXNETFLT_WITH_QDISC */
806 /*
807 * Undo the work done during start (in reverse order).
808 */
809 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
810 AssertRC(rc); NOREF(rc);
811
812 RTR0Term();
813
814 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
815
816 Log(("VBoxNetFltLinuxUnload - done\n"));
817}
818
819
820/**
821 * Experiment where we filter traffic from the host to the internal network
822 * before it reaches the NIC driver.
823 *
824 * The current code uses a very ugly hack and only works on kernels using the
825 * net_device_ops (>= 2.6.29). It has been shown to give us a
826 * performance boost of 60-100% though. So, we have to find some less hacky way
827 * of getting this job done eventually.
828 *
829 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
830 */
831#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
832
833# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
834
835# include <linux/ethtool.h>
836
837typedef struct ethtool_ops OVR_OPSTYPE;
838# define OVR_OPS ethtool_ops
839# define OVR_XMIT pfnStartXmit
840
841# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
842
843typedef struct net_device_ops OVR_OPSTYPE;
844# define OVR_OPS netdev_ops
845# define OVR_XMIT pOrgOps->ndo_start_xmit
846
847# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
848
849/**
850 * The overridden net_device_ops of the device we're attached to.
851 *
852 * As there is no net_device_ops structure in pre-2.6.29 kernels we override
853 * ethtool_ops instead along with hard_start_xmit callback in net_device
854 * structure.
855 *
856 * This is a very dirty hack that was created to explore how much we can improve
857 * the host to guest transfers by not CC'ing the NIC. It turns out to be
858 * the only way to filter outgoing packets for devices without TX queue.
859 */
860typedef struct VBoxNetDeviceOpsOverride
861{
862 /** Our overridden ops. */
863 OVR_OPSTYPE Ops;
864 /** Magic word. */
865 uint32_t u32Magic;
866 /** Pointer to the original ops. */
867 OVR_OPSTYPE const *pOrgOps;
868# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
869 /** Pointer to the original hard_start_xmit function. */
870 int (*pfnStartXmit)(struct sk_buff *pSkb, struct net_device *pDev);
871# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
872 /** Pointer to the net filter instance. */
873 PVBOXNETFLTINS pVBoxNetFlt;
874 /** The number of filtered packages. */
875 uint64_t cFiltered;
876 /** The total number of packets */
877 uint64_t cTotal;
878} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
879/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
880#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
881
882/**
883 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
884 * because they belong on the internal network.
885 *
886 * @returns NETDEV_TX_XXX.
887 * @param pSkb The socket buffer to transmit.
888 * @param pDev The net device.
889 */
890static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
891{
892 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
893 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
894 PCRTNETETHERHDR pEtherHdr;
895 PINTNETTRUNKSWPORT pSwitchPort;
896 uint32_t cbHdrs;
897
898
899 /*
900 * Validate the override structure.
901 *
902 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
903 * to be production quality code, we would have to be much more
904 * careful here and avoid the race.
905 */
906 if ( !VALID_PTR(pOverride)
907 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
908# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) || defined(VBOXNETFLT_WITH_GRO)
909 || !VALID_PTR(pOverride->pOrgOps)
910# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) || defined(VBOXNETFLT_WITH_GRO) */
911 )
912 {
913 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
914 dev_kfree_skb(pSkb);
915 return NETDEV_TX_OK;
916 }
917 pOverride->cTotal++;
918
919 /*
920 * Do the filtering base on the default OUI of our virtual NICs
921 *
922 * Note! In a real solution, we would ask the switch whether the
923 * destination MAC is 100% to be on the internal network and then
924 * drop it.
925 */
926 cbHdrs = skb_headlen(pSkb);
927 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
928 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
929 if ( pEtherHdr
930 && VALID_PTR(pOverride->pVBoxNetFlt)
931 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
932 && VALID_PTR(pSwitchPort)
933 && cbHdrs >= 6)
934 {
935 INTNETSWDECISION enmDecision;
936
937 /** @todo consider reference counting, etc. */
938 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
939 if (enmDecision == INTNETSWDECISION_INTNET)
940 {
941 dev_kfree_skb(pSkb);
942 pOverride->cFiltered++;
943 return NETDEV_TX_OK;
944 }
945 }
946
947 return pOverride->OVR_XMIT(pSkb, pDev);
948}
949
950/**
951 * Hooks the device ndo_start_xmit operation of the device.
952 *
953 * @param pThis The net filter instance.
954 * @param pDev The net device.
955 */
956static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
957{
958 PVBOXNETDEVICEOPSOVERRIDE pOverride;
959 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
960
961 pOverride = RTMemAlloc(sizeof(*pOverride));
962 if (!pOverride)
963 return;
964 pOverride->pOrgOps = pDev->OVR_OPS;
965# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) && !defined(VBOXNETFLT_WITH_GRO)
966 /**
967 * There is no need to save ethtool_ops structure since we only modify
968 * the pointer itself and the structure is optional (#5712).
969 */
970 pOverride->pfnStartXmit = pDev->hard_start_xmit;
971# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) || defined(VBOXNETFLT_WITH_GRO) */
972 pOverride->Ops = *pDev->OVR_OPS;
973 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
974# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
975 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
976 pOverride->cTotal = 0;
977 pOverride->cFiltered = 0;
978 pOverride->pVBoxNetFlt = pThis;
979
980 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
981 ASMAtomicWritePtr((void * volatile *)&pDev->OVR_OPS, pOverride);
982# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
983 ASMAtomicXchgPtr((void * volatile *)&pDev->hard_start_xmit, vboxNetFltLinuxStartXmitFilter);
984# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
985 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
986}
987
988/**
989 * Undos what vboxNetFltLinuxHookDev did.
990 *
991 * @param pThis The net filter instance.
992 * @param pDev The net device. Can be NULL, in which case
993 * we'll try retrieve it from @a pThis.
994 */
995static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
996{
997 PVBOXNETDEVICEOPSOVERRIDE pOverride;
998 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
999
1000 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1001 if (!pDev)
1002 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1003 if (VALID_PTR(pDev))
1004 {
1005 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
1006 if ( VALID_PTR(pOverride)
1007 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
1008 && VALID_PTR(pOverride->pOrgOps)
1009 )
1010 {
1011# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
1012 ASMAtomicWritePtr((void * volatile *)&pDev->hard_start_xmit, pOverride->pfnStartXmit);
1013# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
1014 ASMAtomicWritePtr((void const * volatile *)&pDev->OVR_OPS, pOverride->pOrgOps);
1015 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
1016 }
1017 else
1018 pOverride = NULL;
1019 }
1020 else
1021 pOverride = NULL;
1022 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1023
1024 if (pOverride)
1025 {
1026 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
1027 RTMemFree(pOverride);
1028 }
1029}
1030
1031#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
1032
1033
1034/**
1035 * Reads and retains the host interface handle.
1036 *
1037 * @returns The handle, NULL if detached.
1038 * @param pThis
1039 */
1040DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
1041{
1042#if 0
1043 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1044 struct net_device *pDev = NULL;
1045
1046 Log(("vboxNetFltLinuxRetainNetDev\n"));
1047 /*
1048 * Be careful here to avoid problems racing the detached callback.
1049 */
1050 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1051 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
1052 {
1053 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1054 if (pDev)
1055 {
1056 dev_hold(pDev);
1057 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n",
1058 pDev, pDev->name,
1059#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1060 netdev_refcnt_read(pDev)
1061#else
1062 atomic_read(&pDev->refcnt)
1063#endif
1064 ));
1065 }
1066 }
1067 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1068
1069 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
1070 return pDev;
1071#else
1072 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1073#endif
1074}
1075
1076
1077/**
1078 * Release the host interface handle previously retained
1079 * by vboxNetFltLinuxRetainNetDev.
1080 *
1081 * @param pThis The instance.
1082 * @param pDev The vboxNetFltLinuxRetainNetDev
1083 * return value, NULL is fine.
1084 */
1085DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
1086{
1087#if 0
1088 Log(("vboxNetFltLinuxReleaseNetDev\n"));
1089 NOREF(pThis);
1090 if (pDev)
1091 {
1092 dev_put(pDev);
1093 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n",
1094 pDev, pDev->name,
1095#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1096 netdev_refcnt_read(pDev)
1097#else
1098 atomic_read(&pDev->refcnt)
1099#endif
1100 ));
1101 }
1102 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1103#endif
1104}
1105
1106#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1107#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1108
1109/**
1110 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1111 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1112 *
1113 * @returns true / false accordingly.
1114 * @param pBuf The sk_buff.
1115 */
1116DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1117{
1118 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1119}
1120
1121
1122/**
1123 * Internal worker that create a linux sk_buff for a
1124 * (scatter/)gather list.
1125 *
1126 * @returns Pointer to the sk_buff.
1127 * @param pThis The instance.
1128 * @param pSG The (scatter/)gather list.
1129 * @param fDstWire Set if the destination is the wire.
1130 */
1131static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1132{
1133 struct sk_buff *pPkt;
1134 struct net_device *pDev;
1135 unsigned fGsoType = 0;
1136
1137 if (pSG->cbTotal == 0)
1138 {
1139 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1140 return NULL;
1141 }
1142
1143 /** @todo We should use fragments mapping the SG buffers with large packets.
1144 * 256 bytes seems to be the a threshold used a lot for this. It
1145 * requires some nasty work on the intnet side though... */
1146 /*
1147 * Allocate a packet and copy over the data.
1148 */
1149 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1150 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1151 if (RT_UNLIKELY(!pPkt))
1152 {
1153 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1154 pSG->pvUserData = NULL;
1155 return NULL;
1156 }
1157 pPkt->dev = pDev;
1158 pPkt->ip_summed = CHECKSUM_NONE;
1159
1160 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1161 skb_reserve(pPkt, NET_IP_ALIGN);
1162
1163 /* Copy the segments. */
1164 skb_put(pPkt, pSG->cbTotal);
1165 IntNetSgRead(pSG, pPkt->data);
1166
1167#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1168 /*
1169 * Setup GSO if used by this packet.
1170 */
1171 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1172 {
1173 default:
1174 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1175 /* fall thru */
1176 case PDMNETWORKGSOTYPE_INVALID:
1177 fGsoType = 0;
1178 break;
1179 case PDMNETWORKGSOTYPE_IPV4_TCP:
1180 fGsoType = SKB_GSO_TCPV4;
1181 break;
1182 case PDMNETWORKGSOTYPE_IPV4_UDP:
1183 fGsoType = SKB_GSO_UDP;
1184 break;
1185 case PDMNETWORKGSOTYPE_IPV6_TCP:
1186 fGsoType = SKB_GSO_TCPV6;
1187 break;
1188 }
1189 if (fGsoType)
1190 {
1191 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1192
1193 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1194 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1195 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1196
1197 /*
1198 * We need to set checksum fields even if the packet goes to the host
1199 * directly as it may be immediately forwarded by IP layer @bugref{5020}.
1200 */
1201 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1202 pPkt->ip_summed = CHECKSUM_PARTIAL;
1203# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1204 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1205 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1206 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1207 else
1208 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1209# else
1210 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1211 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1212 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1213 else
1214 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1215# endif
1216 if (!fDstWire)
1217 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, PDMNETCSUMTYPE_PSEUDO);
1218 }
1219#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1220
1221 /*
1222 * Finish up the socket buffer.
1223 */
1224 pPkt->protocol = eth_type_trans(pPkt, pDev);
1225 if (fDstWire)
1226 {
1227 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1228
1229 /* Restore ethernet header back. */
1230 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1231 VBOX_SKB_RESET_MAC_HDR(pPkt);
1232 }
1233 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1234
1235 return pPkt;
1236}
1237
1238
1239/**
1240 * Initializes a SG list from an sk_buff.
1241 *
1242 * @returns Number of segments.
1243 * @param pThis The instance.
1244 * @param pBuf The sk_buff.
1245 * @param pSG The SG.
1246 * @param pvFrame The frame pointer, optional.
1247 * @param cSegs The number of segments allocated for the SG.
1248 * This should match the number in the mbuf exactly!
1249 * @param fSrc The source of the frame.
1250 * @param pGso Pointer to the GSO context if it's a GSO
1251 * internal network frame. NULL if regular frame.
1252 */
1253DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1254 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1255{
1256 int i;
1257 NOREF(pThis);
1258
1259 Assert(!skb_shinfo(pBuf)->frag_list);
1260
1261 if (!pGsoCtx)
1262 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1263 else
1264 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1265
1266#ifdef VBOXNETFLT_SG_SUPPORT
1267 pSG->aSegs[0].cb = skb_headlen(pBuf);
1268 pSG->aSegs[0].pv = pBuf->data;
1269 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1270
1271 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1272 {
1273 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1274 pSG->aSegs[i+1].cb = pFrag->size;
1275 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1276 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1277 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1278 }
1279 ++i;
1280
1281#else
1282 pSG->aSegs[0].cb = pBuf->len;
1283 pSG->aSegs[0].pv = pBuf->data;
1284 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1285 i = 1;
1286#endif
1287
1288 pSG->cSegsUsed = i;
1289
1290#ifdef PADD_RUNT_FRAMES_FROM_HOST
1291 /*
1292 * Add a trailer if the frame is too small.
1293 *
1294 * Since we're getting to the packet before it is framed, it has not
1295 * yet been padded. The current solution is to add a segment pointing
1296 * to a buffer containing all zeros and pray that works for all frames...
1297 */
1298 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1299 {
1300 static uint8_t const s_abZero[128] = {0};
1301
1302 AssertReturnVoid(i < cSegs);
1303
1304 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1305 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1306 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1307 pSG->cbTotal = 60;
1308 pSG->cSegsUsed++;
1309 Assert(i + 1 <= pSG->cSegsAlloc)
1310 }
1311#endif
1312
1313 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1314 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1315 for (i = 0; i < pSG->cSegsUsed; i++)
1316 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1317 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1318}
1319
1320/**
1321 * Packet handler,
1322 *
1323 * @returns 0 or EJUSTRETURN.
1324 * @param pThis The instance.
1325 * @param pMBuf The mbuf.
1326 * @param pvFrame The start of the frame, optional.
1327 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1328 * @param eProtocol The protocol.
1329 */
1330#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1331static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1332 struct net_device *pSkbDev,
1333 struct packet_type *pPacketType,
1334 struct net_device *pOrigDev)
1335#else
1336static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1337 struct net_device *pSkbDev,
1338 struct packet_type *pPacketType)
1339#endif
1340{
1341 PVBOXNETFLTINS pThis;
1342 struct net_device *pDev;
1343 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1344 pBuf, pSkbDev, pPacketType));
1345#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1346 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1347 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1348# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1349 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1350# endif
1351#else
1352 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1353 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1354#endif
1355 /*
1356 * Drop it immediately?
1357 */
1358 if (!pBuf)
1359 return 0;
1360
1361 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1362 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1363 if (pDev != pSkbDev)
1364 {
1365 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1366 return 0;
1367 }
1368
1369 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1370 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1371 {
1372 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1373 dev_kfree_skb(pBuf);
1374 return 0;
1375 }
1376
1377#ifndef VBOXNETFLT_SG_SUPPORT
1378 {
1379 /*
1380 * Get rid of fragmented packets, they cause too much trouble.
1381 */
1382 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1383 kfree_skb(pBuf);
1384 if (!pCopy)
1385 {
1386 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1387 return 0;
1388 }
1389 pBuf = pCopy;
1390# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1391 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1392 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1393# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1394 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1395# endif
1396# else
1397 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1398 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1399# endif
1400 }
1401#endif
1402
1403#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1404 /* Forward it to the internal network. */
1405 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1406#else
1407 /* Add the packet to transmit queue and schedule the bottom half. */
1408 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1409 schedule_work(&pThis->u.s.XmitTask);
1410 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1411 &pThis->u.s.XmitTask, pBuf));
1412#endif
1413
1414 /* It does not really matter what we return, it is ignored by the kernel. */
1415 return 0;
1416}
1417
1418/**
1419 * Calculate the number of INTNETSEG segments the socket buffer will need.
1420 *
1421 * @returns Segment count.
1422 * @param pBuf The socket buffer.
1423 */
1424DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1425{
1426#ifdef VBOXNETFLT_SG_SUPPORT
1427 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1428#else
1429 unsigned cSegs = 1;
1430#endif
1431#ifdef PADD_RUNT_FRAMES_FROM_HOST
1432 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1433 if (pBuf->len < 60)
1434 cSegs++;
1435#endif
1436 return cSegs;
1437}
1438
1439/**
1440 * Destroy the intnet scatter / gather buffer created by
1441 * vboxNetFltLinuxSkBufToSG.
1442 */
1443static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1444{
1445#ifdef VBOXNETFLT_SG_SUPPORT
1446 int i;
1447
1448 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1449 {
1450 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1451 kunmap(pSG->aSegs[i+1].pv);
1452 }
1453#endif
1454 NOREF(pSG);
1455}
1456
1457#ifdef LOG_ENABLED
1458/**
1459 * Logging helper.
1460 */
1461static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1462{
1463 uint8_t *pInt, *pExt;
1464 static int iPacketNo = 1;
1465 iPacketNo += iIncrement;
1466 if (fEgress)
1467 {
1468 pExt = pSG->aSegs[0].pv;
1469 pInt = pExt + 6;
1470 }
1471 else
1472 {
1473 pInt = pSG->aSegs[0].pv;
1474 pExt = pInt + 6;
1475 }
1476 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1477 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1478 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1479 fEgress ? "-->" : "<--", pszWhere,
1480 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1481 pSG->cbTotal, iPacketNo));
1482 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1483}
1484#else
1485# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1486#endif
1487
1488#ifdef VBOXNETFLT_WITH_GSO_RECV
1489
1490/**
1491 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1492 * GSO socket buffer without having to segment it.
1493 *
1494 * @returns true on success, false if needs segmenting.
1495 * @param pThis The net filter instance.
1496 * @param pSkb The GSO socket buffer.
1497 * @param fSrc The source.
1498 * @param pGsoCtx Where to return the GSO context on success.
1499 */
1500static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1501 PPDMNETWORKGSO pGsoCtx)
1502{
1503 PDMNETWORKGSOTYPE enmGsoType;
1504 uint16_t uEtherType;
1505 unsigned int cbTransport;
1506 unsigned int offTransport;
1507 unsigned int cbTransportHdr;
1508 unsigned uProtocol;
1509 union
1510 {
1511 RTNETIPV4 IPv4;
1512 RTNETIPV6 IPv6;
1513 RTNETTCP Tcp;
1514 uint8_t ab[40];
1515 uint16_t au16[40/2];
1516 uint32_t au32[40/4];
1517 } Buf;
1518
1519 /*
1520 * Check the GSO properties of the socket buffer and make sure it fits.
1521 */
1522 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1523 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1524 {
1525 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1526 return false;
1527 }
1528 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1529 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1530 {
1531 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1532 return false;
1533 }
1534 /*
1535 * It is possible to receive GSO packets from wire if GRO is enabled.
1536 */
1537 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1538 {
1539 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1540#ifdef VBOXNETFLT_WITH_GRO
1541 /*
1542 * The packet came from the wire and the driver has already consumed
1543 * mac header. We need to restore it back.
1544 */
1545 pSkb->mac_len = skb_network_header(pSkb) - skb_mac_header(pSkb);
1546 skb_push(pSkb, pSkb->mac_len);
1547 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1548 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1549#else /* !VBOXNETFLT_WITH_GRO */
1550 /* Older kernels didn't have GRO. */
1551 return false;
1552#endif /* !VBOXNETFLT_WITH_GRO */
1553 }
1554 else
1555 {
1556 /*
1557 * skb_gso_segment does the following. Do we need to do it as well?
1558 */
1559#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1560 skb_reset_mac_header(pSkb);
1561 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1562#else
1563 pSkb->mac.raw = pSkb->data;
1564 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1565#endif
1566 }
1567
1568 /*
1569 * Switch on the ethertype.
1570 */
1571 uEtherType = pSkb->protocol;
1572 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1573 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1574 {
1575 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1576 if (puEtherType)
1577 uEtherType = *puEtherType;
1578 }
1579 switch (uEtherType)
1580 {
1581 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1582 {
1583 unsigned int cbHdr;
1584 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1585 if (RT_UNLIKELY(!pIPv4))
1586 {
1587 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1588 return false;
1589 }
1590
1591 cbHdr = pIPv4->ip_hl * 4;
1592 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1593 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1594 || cbHdr > cbTransport ))
1595 {
1596 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1597 return false;
1598 }
1599 cbTransport -= cbHdr;
1600 offTransport = pSkb->mac_len + cbHdr;
1601 uProtocol = pIPv4->ip_p;
1602 if (uProtocol == RTNETIPV4_PROT_TCP)
1603 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1604 else if (uProtocol == RTNETIPV4_PROT_UDP)
1605 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1606 else /** @todo IPv6: 4to6 tunneling */
1607 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1608 break;
1609 }
1610
1611 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1612 {
1613 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1614 if (RT_UNLIKELY(!pIPv6))
1615 {
1616 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1617 return false;
1618 }
1619
1620 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1621 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1622 uProtocol = pIPv6->ip6_nxt;
1623 /** @todo IPv6: Dig our way out of the other headers. */
1624 if (uProtocol == RTNETIPV4_PROT_TCP)
1625 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1626 else if (uProtocol == RTNETIPV4_PROT_UDP)
1627 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1628 else
1629 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1630 break;
1631 }
1632
1633 default:
1634 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1635 return false;
1636 }
1637
1638 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1639 {
1640 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1641 return false;
1642 }
1643
1644 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1645 || offTransport + cbTransport > pSkb->len
1646 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1647 {
1648 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1649 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1650 return false;
1651 }
1652
1653 /*
1654 * Check the TCP/UDP bits.
1655 */
1656 if (uProtocol == RTNETIPV4_PROT_TCP)
1657 {
1658 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1659 if (RT_UNLIKELY(!pTcp))
1660 {
1661 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1662 return false;
1663 }
1664
1665 cbTransportHdr = pTcp->th_off * 4;
1666 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1667 || cbTransportHdr > cbTransport
1668 || offTransport + cbTransportHdr >= UINT8_MAX
1669 || offTransport + cbTransportHdr >= pSkb->len ))
1670 {
1671 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1672 return false;
1673 }
1674
1675 }
1676 else
1677 {
1678 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1679 cbTransportHdr = sizeof(RTNETUDP);
1680 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1681 || offTransport + cbTransportHdr >= pSkb->len ))
1682 {
1683 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1684 return false;
1685 }
1686 }
1687
1688 /*
1689 * We're good, init the GSO context.
1690 */
1691 pGsoCtx->u8Type = enmGsoType;
1692 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1693 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1694 pGsoCtx->offHdr1 = pSkb->mac_len;
1695 pGsoCtx->offHdr2 = offTransport;
1696 pGsoCtx->au8Unused[0] = 0;
1697 pGsoCtx->au8Unused[1] = 0;
1698
1699 return true;
1700}
1701
1702/**
1703 * Forward the socket buffer as a GSO internal network frame.
1704 *
1705 * @returns IPRT status code.
1706 * @param pThis The net filter instance.
1707 * @param pSkb The GSO socket buffer.
1708 * @param fSrc The source.
1709 * @param pGsoCtx Where to return the GSO context on success.
1710 */
1711static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1712{
1713 int rc;
1714 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1715 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1716 {
1717 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1718 if (RT_LIKELY(pSG))
1719 {
1720 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1721
1722 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1723 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1724
1725 vboxNetFltLinuxDestroySG(pSG);
1726 rc = VINF_SUCCESS;
1727 }
1728 else
1729 {
1730 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1731 rc = VERR_NO_MEMORY;
1732 }
1733 }
1734 else
1735 {
1736 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1737 rc = VERR_INTERNAL_ERROR_3;
1738 }
1739
1740 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1741 dev_kfree_skb(pSkb);
1742 return rc;
1743}
1744
1745#endif /* VBOXNETFLT_WITH_GSO_RECV */
1746
1747/**
1748 * Worker for vboxNetFltLinuxForwardToIntNet.
1749 *
1750 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1751 * @param pThis The net filter instance.
1752 * @param pBuf The socket buffer.
1753 * @param fSrc The source.
1754 */
1755static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1756{
1757 int rc;
1758 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1759 if (cSegs <= MAX_SKB_FRAGS + 1)
1760 {
1761 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1762 if (RT_LIKELY(pSG))
1763 {
1764 if (fSrc & INTNETTRUNKDIR_WIRE)
1765 {
1766 /*
1767 * The packet came from wire, ethernet header was removed by device driver.
1768 * Restore it.
1769 */
1770 skb_push(pBuf, ETH_HLEN);
1771 }
1772
1773 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1774
1775 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1776 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1777
1778 vboxNetFltLinuxDestroySG(pSG);
1779 rc = VINF_SUCCESS;
1780 }
1781 else
1782 {
1783 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1784 rc = VERR_NO_MEMORY;
1785 }
1786 }
1787 else
1788 {
1789 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1790 rc = VERR_INTERNAL_ERROR_3;
1791 }
1792
1793 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1794 dev_kfree_skb(pBuf);
1795 return rc;
1796}
1797
1798/**
1799 *
1800 * @param pBuf The socket buffer. This is consumed by this function.
1801 */
1802static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1803{
1804 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1805
1806#ifdef VBOXNETFLT_WITH_GSO
1807 if (skb_is_gso(pBuf))
1808 {
1809 PDMNETWORKGSO GsoCtx;
1810 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1811 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1812# ifdef VBOXNETFLT_WITH_GSO_RECV
1813 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1814 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1815 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1816 else
1817# endif
1818 {
1819 /* Need to segment the packet */
1820 struct sk_buff *pNext;
1821 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1822 if (IS_ERR(pSegment))
1823 {
1824 dev_kfree_skb(pBuf);
1825 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1826 return;
1827 }
1828
1829 for (; pSegment; pSegment = pNext)
1830 {
1831 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1832 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1833 pNext = pSegment->next;
1834 pSegment->next = 0;
1835 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1836 }
1837 dev_kfree_skb(pBuf);
1838 }
1839 }
1840 else
1841#endif /* VBOXNETFLT_WITH_GSO */
1842 {
1843 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1844 {
1845#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1846 /*
1847 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1848 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1849 * header length from the header itself and reconstruct 'h' pointer
1850 * to TCP (or whatever) header.
1851 */
1852 unsigned char *tmp = pBuf->h.raw;
1853 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1854 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1855#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1856 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1857 {
1858 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1859 dev_kfree_skb(pBuf);
1860 return;
1861 }
1862#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1863 /* Restore the original (wrong) pointer. */
1864 pBuf->h.raw = tmp;
1865#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1866 }
1867 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1868 }
1869}
1870
1871#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1872/**
1873 * Work queue handler that forwards the socket buffers queued by
1874 * vboxNetFltLinuxPacketHandler to the internal network.
1875 *
1876 * @param pWork The work queue.
1877 */
1878# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1879static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1880# else
1881static void vboxNetFltLinuxXmitTask(void *pWork)
1882# endif
1883{
1884 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1885 struct sk_buff *pBuf;
1886
1887 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1888
1889 /*
1890 * Active? Retain the instance and increment the busy counter.
1891 */
1892 if (vboxNetFltTryRetainBusyActive(pThis))
1893 {
1894 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1895 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1896
1897 vboxNetFltRelease(pThis, true /* fBusy */);
1898 }
1899 else
1900 {
1901 /** @todo Shouldn't we just drop the packets here? There is little point in
1902 * making them accumulate when the VM is paused and it'll only waste
1903 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1904 * before start draining the packets (goes for the intnet ring buf
1905 * too)? */
1906 }
1907}
1908#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1909
1910/**
1911 * Reports the GSO capabilities of the hardware NIC.
1912 *
1913 * @param pThis The net filter instance. The caller hold a
1914 * reference to this.
1915 */
1916static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1917{
1918#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1919 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1920 {
1921 struct net_device *pDev;
1922 PINTNETTRUNKSWPORT pSwitchPort;
1923 unsigned int fFeatures;
1924 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1925
1926 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1927
1928 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1929 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1930 if (pDev)
1931 fFeatures = pDev->features;
1932 else
1933 fFeatures = 0;
1934
1935 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1936
1937 if (pThis->pSwitchPort)
1938 {
1939 /* Set/update the GSO capabilities of the NIC. */
1940 uint32_t fGsoCapabilites = 0;
1941 if (fFeatures & NETIF_F_TSO)
1942 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1943 if (fFeatures & NETIF_F_TSO6)
1944 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1945# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1946 if (fFeatures & NETIF_F_UFO)
1947 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1948 if (fFeatures & NETIF_F_UFO)
1949 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1950# endif
1951 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1952 }
1953
1954 vboxNetFltRelease(pThis, true /*fBusy*/);
1955 }
1956#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1957}
1958
1959/**
1960 * Helper that determines whether the host (ignoreing us) is operating the
1961 * interface in promiscuous mode or not.
1962 */
1963static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1964{
1965 bool fRc = false;
1966 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1967 if (pDev)
1968 {
1969 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1970 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1971 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1972 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1973 }
1974 return fRc;
1975}
1976
1977#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1978/**
1979 * Helper for detecting TAP devices.
1980 */
1981static bool vboxNetFltIsTapDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1982{
1983 if (pDev->ethtool_ops && pDev->ethtool_ops->get_drvinfo)
1984 {
1985 struct ethtool_drvinfo Info;
1986
1987 memset(&Info, 0, sizeof(Info));
1988 Info.cmd = ETHTOOL_GDRVINFO;
1989 pDev->ethtool_ops->get_drvinfo(pDev, &Info);
1990 Log3(("vboxNetFltIsTapDevice: driver=%s version=%s bus_info=%s\n",
1991 Info.driver, Info.version, Info.bus_info));
1992
1993 return !strncmp(Info.driver, "tun", 4)
1994 && !strncmp(Info.bus_info, "tap", 4);
1995 }
1996
1997 return false;
1998}
1999
2000/**
2001 * Helper for updating the link state of TAP devices.
2002 * Only TAP devices are affected.
2003 */
2004static void vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
2005{
2006 if (vboxNetFltIsTapDevice(pThis, pDev))
2007 {
2008 Log3(("vboxNetFltSetTapLinkState: bringing %s tap device link state\n",
2009 fLinkUp ? "up" : "down"));
2010 netif_tx_lock_bh(pDev);
2011 if (fLinkUp)
2012 netif_carrier_on(pDev);
2013 else
2014 netif_carrier_off(pDev);
2015 netif_tx_unlock_bh(pDev);
2016 }
2017}
2018#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2019DECLINLINE(void) vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
2020{
2021 /* Nothing to do for pre-2.6.36 kernels. */
2022}
2023#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2024
2025/**
2026 * Internal worker for vboxNetFltLinuxNotifierCallback.
2027 *
2028 * @returns VBox status code.
2029 * @param pThis The instance.
2030 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
2031 * flood the release log.
2032 */
2033static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
2034{
2035 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2036 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
2037
2038 /*
2039 * Retain and store the device.
2040 */
2041 dev_hold(pDev);
2042
2043 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2044 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
2045 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2046
2047 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n",
2048 pDev, pDev->name,
2049#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2050 netdev_refcnt_read(pDev)
2051#else
2052 atomic_read(&pDev->refcnt)
2053#endif
2054 ));
2055 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2056 pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2057
2058 /* Get the mac address while we still have a valid net_device reference. */
2059 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
2060
2061 /*
2062 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
2063 */
2064 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
2065 pThis->u.s.PacketType.dev = pDev;
2066 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
2067 dev_add_pack(&pThis->u.s.PacketType);
2068
2069#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2070 vboxNetFltLinuxHookDev(pThis, pDev);
2071#endif
2072#ifdef VBOXNETFLT_WITH_QDISC
2073 vboxNetFltLinuxQdiscInstall(pThis, pDev);
2074#endif /* VBOXNETFLT_WITH_QDISC */
2075
2076 /*
2077 * If attaching to TAP interface we need to bring the link state up
2078 * starting from 2.6.36 kernel.
2079 */
2080 vboxNetFltSetTapLinkState(pThis, pDev, true);
2081
2082 /*
2083 * Set indicators that require the spinlock. Be abit paranoid about racing
2084 * the device notification handle.
2085 */
2086 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2087 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2088 if (pDev)
2089 {
2090 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
2091 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
2092 pDev = NULL; /* don't dereference it */
2093 }
2094 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2095 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
2096
2097 /*
2098 * If the above succeeded report GSO capabilities, if not undo and
2099 * release the device.
2100 */
2101 if (!pDev)
2102 {
2103 Assert(pThis->pSwitchPort);
2104 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
2105 {
2106 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2107 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
2108 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
2109 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
2110 vboxNetFltRelease(pThis, true /*fBusy*/);
2111 }
2112 }
2113 else
2114 {
2115#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2116 vboxNetFltLinuxUnhookDev(pThis, pDev);
2117#endif
2118#ifdef VBOXNETFLT_WITH_QDISC
2119 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2120#endif /* VBOXNETFLT_WITH_QDISC */
2121 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2122 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2123 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2124 dev_put(pDev);
2125 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n",
2126 pDev, pDev->name,
2127#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2128 netdev_refcnt_read(pDev)
2129#else
2130 atomic_read(&pDev->refcnt)
2131#endif
2132 ));
2133 }
2134
2135 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
2136 return VINF_SUCCESS;
2137}
2138
2139
2140static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
2141{
2142 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2143
2144 Assert(!pThis->fDisconnectedFromHost);
2145
2146#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2147 vboxNetFltLinuxUnhookDev(pThis, pDev);
2148#endif
2149#ifdef VBOXNETFLT_WITH_QDISC
2150 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2151#endif /* VBOXNETFLT_WITH_QDISC */
2152
2153 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2154 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
2155 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
2156 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2157 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2158
2159 dev_remove_pack(&pThis->u.s.PacketType);
2160#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2161 skb_queue_purge(&pThis->u.s.XmitQueue);
2162#endif
2163 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2164 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n",
2165 pDev, pDev->name,
2166#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2167 netdev_refcnt_read(pDev)
2168#else
2169 atomic_read(&pDev->refcnt)
2170#endif
2171 ));
2172 dev_put(pDev);
2173
2174 return NOTIFY_OK;
2175}
2176
2177static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2178{
2179 /* Check if we are not suspended and promiscuous mode has not been set. */
2180 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2181 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2182 {
2183 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2184 dev_set_promiscuity(pDev, 1);
2185 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2186 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2187 }
2188 else
2189 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2190 return NOTIFY_OK;
2191}
2192
2193static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2194{
2195 /* Undo promiscuous mode if we has set it. */
2196 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2197 {
2198 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2199 dev_set_promiscuity(pDev, -1);
2200 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2201 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2202 }
2203 else
2204 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2205 return NOTIFY_OK;
2206}
2207
2208#ifdef LOG_ENABLED
2209/** Stringify the NETDEV_XXX constants. */
2210static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2211{
2212 const char *pszEvent = "NETDRV_<unknown>";
2213 switch (ulEventType)
2214 {
2215 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2216 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2217 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2218 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2219 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2220 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2221 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2222 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2223 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2224 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2225# ifdef NETDEV_FEAT_CHANGE
2226 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2227# endif
2228 }
2229 return pszEvent;
2230}
2231#endif /* LOG_ENABLED */
2232
2233/**
2234 * Callback for listening to netdevice events.
2235 *
2236 * This works the rediscovery, clean up on unregistration, promiscuity on
2237 * up/down, and GSO feature changes from ethtool.
2238 *
2239 * @returns NOTIFY_OK
2240 * @param self Pointer to our notifier registration block.
2241 * @param ulEventType The event.
2242 * @param ptr Event specific, but it is usually the device it
2243 * relates to.
2244 */
2245static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2246
2247{
2248 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2249 struct net_device *pDev = (struct net_device *)ptr;
2250 int rc = NOTIFY_OK;
2251
2252 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2253 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2254 if ( ulEventType == NETDEV_REGISTER
2255 && !strcmp(pDev->name, pThis->szName))
2256 {
2257 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2258 }
2259 else
2260 {
2261 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2262 if (pDev == ptr)
2263 {
2264 switch (ulEventType)
2265 {
2266 case NETDEV_UNREGISTER:
2267 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2268 break;
2269 case NETDEV_UP:
2270 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2271 break;
2272 case NETDEV_GOING_DOWN:
2273 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2274 break;
2275 case NETDEV_CHANGENAME:
2276 break;
2277#ifdef NETDEV_FEAT_CHANGE
2278 case NETDEV_FEAT_CHANGE:
2279 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2280 break;
2281#endif
2282 }
2283 }
2284 }
2285
2286 return rc;
2287}
2288
2289bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2290{
2291 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2292}
2293
2294int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2295{
2296 struct net_device * pDev;
2297 int err;
2298 int rc = VINF_SUCCESS;
2299 NOREF(pvIfData);
2300
2301 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2302
2303 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2304 if (pDev)
2305 {
2306 /*
2307 * Create a sk_buff for the gather list and push it onto the wire.
2308 */
2309 if (fDst & INTNETTRUNKDIR_WIRE)
2310 {
2311 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2312 if (pBuf)
2313 {
2314 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2315 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2316 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2317 err = dev_queue_xmit(pBuf);
2318 if (err)
2319 rc = RTErrConvertFromErrno(err);
2320 }
2321 else
2322 rc = VERR_NO_MEMORY;
2323 }
2324
2325 /*
2326 * Create a sk_buff for the gather list and push it onto the host stack.
2327 */
2328 if (fDst & INTNETTRUNKDIR_HOST)
2329 {
2330 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2331 if (pBuf)
2332 {
2333 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2334 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2335 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2336 err = netif_rx_ni(pBuf);
2337 if (err)
2338 rc = RTErrConvertFromErrno(err);
2339 }
2340 else
2341 rc = VERR_NO_MEMORY;
2342 }
2343
2344 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2345 }
2346
2347 return rc;
2348}
2349
2350
2351void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2352{
2353 struct net_device * pDev;
2354
2355 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2356 pThis, pThis->szName, fActive?"true":"false",
2357 pThis->fDisablePromiscuous?"true":"false"));
2358
2359 if (pThis->fDisablePromiscuous)
2360 return;
2361
2362 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2363 if (pDev)
2364 {
2365 /*
2366 * This api is a bit weird, the best reference is the code.
2367 *
2368 * Also, we have a bit or race conditions wrt the maintenance of
2369 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2370 */
2371#ifdef LOG_ENABLED
2372 u_int16_t fIf;
2373 unsigned const cPromiscBefore = pDev->promiscuity;
2374#endif
2375 if (fActive)
2376 {
2377 Assert(!pThis->u.s.fPromiscuousSet);
2378
2379 rtnl_lock();
2380 dev_set_promiscuity(pDev, 1);
2381 rtnl_unlock();
2382 pThis->u.s.fPromiscuousSet = true;
2383 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2384 }
2385 else
2386 {
2387 if (pThis->u.s.fPromiscuousSet)
2388 {
2389 rtnl_lock();
2390 dev_set_promiscuity(pDev, -1);
2391 rtnl_unlock();
2392 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2393 }
2394 pThis->u.s.fPromiscuousSet = false;
2395
2396#ifdef LOG_ENABLED
2397 fIf = dev_get_flags(pDev);
2398 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2399#endif
2400 }
2401
2402 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2403 }
2404}
2405
2406
2407int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2408{
2409#ifdef VBOXNETFLT_WITH_QDISC
2410 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2411#endif /* VBOXNETFLT_WITH_QDISC */
2412 /*
2413 * Remove packet handler when we get disconnected from internal switch as
2414 * we don't want the handler to forward packets to disconnected switch.
2415 */
2416 dev_remove_pack(&pThis->u.s.PacketType);
2417 return VINF_SUCCESS;
2418}
2419
2420
2421int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2422{
2423 /*
2424 * Report the GSO capabilities of the host and device (if connected).
2425 * Note! No need to mark ourselves busy here.
2426 */
2427 /** @todo duplicate work here now? Attach */
2428#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2429 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2430 0
2431 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2432 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2433# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2434 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2435 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2436# endif
2437 , INTNETTRUNKDIR_HOST);
2438
2439#endif
2440 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2441
2442 return VINF_SUCCESS;
2443}
2444
2445
2446void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2447{
2448 struct net_device *pDev;
2449 bool fRegistered;
2450 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2451
2452#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2453 vboxNetFltLinuxUnhookDev(pThis, NULL);
2454#endif
2455
2456 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2457 * unlikely, but none the less). Since it doesn't actually update the
2458 * state (just reads it), it is likely to panic in some interesting
2459 * ways. */
2460
2461 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2462 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2463 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2464 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2465
2466 if (fRegistered)
2467 {
2468 vboxNetFltSetTapLinkState(pThis, pDev, false);
2469
2470#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2471 skb_queue_purge(&pThis->u.s.XmitQueue);
2472#endif
2473 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2474 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n",
2475 pDev, pDev->name,
2476#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2477 netdev_refcnt_read(pDev)
2478#else
2479 atomic_read(&pDev->refcnt)
2480#endif
2481 ));
2482 dev_put(pDev);
2483 }
2484 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2485 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2486 module_put(THIS_MODULE);
2487}
2488
2489
2490int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2491{
2492 int err;
2493 NOREF(pvContext);
2494
2495 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2496 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2497 if (err)
2498 return VERR_INTNET_FLT_IF_FAILED;
2499 if (!pThis->u.s.fRegistered)
2500 {
2501 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2502 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2503 return VERR_INTNET_FLT_IF_NOT_FOUND;
2504 }
2505
2506 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2507 if ( pThis->fDisconnectedFromHost
2508 || !try_module_get(THIS_MODULE))
2509 return VERR_INTNET_FLT_IF_FAILED;
2510
2511 return VINF_SUCCESS;
2512}
2513
2514int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2515{
2516 /*
2517 * Init the linux specific members.
2518 */
2519 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2520 pThis->u.s.fRegistered = false;
2521 pThis->u.s.fPromiscuousSet = false;
2522 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2523#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2524 skb_queue_head_init(&pThis->u.s.XmitQueue);
2525# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2526 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2527# else
2528 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2529# endif
2530#endif
2531
2532 return VINF_SUCCESS;
2533}
2534
2535
2536void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2537{
2538 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2539}
2540
2541
2542int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2543{
2544 /* Nothing to do */
2545 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2546 return VINF_SUCCESS;
2547}
2548
2549
2550int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2551{
2552 /* Nothing to do */
2553 NOREF(pThis); NOREF(pvIfData);
2554 return VINF_SUCCESS;
2555}
2556
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette