VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 31678

Last change on this file since 31678 was 31678, checked in by vboxsync, 14 years ago

vboxnetflt: pre 2.6.10 kernel compilation fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 82.4 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 31678 2010-08-13 18:45:20Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53/*
54 * Comment out the following line to disable qdisc support.
55 */
56#define VBOXNETFLT_WITH_QDISC
57#ifdef VBOXNETFLT_WITH_QDISC
58#include <net/pkt_sched.h>
59#endif /* VBOXNETFLT_WITH_QDISC */
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
66#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
67#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
68# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
69#endif
70
71#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
72# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
73# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
74#else
75# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
76# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
77#endif
78
79#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
80# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
81#else
82# define CHECKSUM_PARTIAL CHECKSUM_HW
83# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
84# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
85# else
86# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
87# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
88# else
89# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
90# endif
91/* Versions prior 2.6.10 use stats for both bstats and qstats */
92# define bstats stats
93# define qstats stats
94# endif
95#endif
96
97#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)
98static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
99{
100 kfree_skb(skb);
101 sch->stats.drops++;
102
103 return NET_XMIT_DROP;
104}
105#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) */
106
107#ifndef NET_IP_ALIGN
108# define NET_IP_ALIGN 2
109#endif
110
111#if 0
112/** Create scatter / gather segments for fragments. When not used, we will
113 * linearize the socket buffer before creating the internal networking SG. */
114# define VBOXNETFLT_SG_SUPPORT 1
115#endif
116
117#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
118/** Indicates that the linux kernel may send us GSO frames. */
119# define VBOXNETFLT_WITH_GSO 1
120
121/** This enables or disables the transmitting of GSO frame from the internal
122 * network and to the host. */
123# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
124
125# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
126/** This enables or disables the transmitting of GSO frame from the internal
127 * network and to the wire. */
128# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
129# endif
130
131/** This enables or disables the forwarding/flooding of GSO frame from the host
132 * to the internal network. */
133# define VBOXNETFLT_WITH_GSO_RECV 1
134
135#endif
136
137#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
138/** This enables or disables handling of GSO frames coming from the wire (GRO). */
139# define VBOXNETFLT_WITH_GRO 1
140#endif
141/*
142 * GRO support was backported to RHEL 5.4
143 */
144#ifdef RHEL_RELEASE_CODE
145# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
146# define VBOXNETFLT_WITH_GRO 1
147# endif
148#endif
149
150/*******************************************************************************
151* Internal Functions *
152*******************************************************************************/
153static int VBoxNetFltLinuxInit(void);
154static void VBoxNetFltLinuxUnload(void);
155static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
156
157
158/*******************************************************************************
159* Global Variables *
160*******************************************************************************/
161/**
162 * The (common) global data.
163 */
164static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
165
166module_init(VBoxNetFltLinuxInit);
167module_exit(VBoxNetFltLinuxUnload);
168
169MODULE_AUTHOR(VBOX_VENDOR);
170MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
171MODULE_LICENSE("GPL");
172#ifdef MODULE_VERSION
173MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
174#endif
175
176
177#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
178unsigned dev_get_flags(const struct net_device *dev)
179{
180 unsigned flags;
181
182 flags = (dev->flags & ~(IFF_PROMISC |
183 IFF_ALLMULTI |
184 IFF_RUNNING)) |
185 (dev->gflags & (IFF_PROMISC |
186 IFF_ALLMULTI));
187
188 if (netif_running(dev) && netif_carrier_ok(dev))
189 flags |= IFF_RUNNING;
190
191 return flags;
192}
193#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
194
195
196#ifdef VBOXNETFLT_WITH_QDISC
197//#define QDISC_LOG(x) printk x
198#define QDISC_LOG(x) do { } while (0)
199
200#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
201#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
202#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
203#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
204#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
205#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
206#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
207
208#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
209#define qdisc_dev(qdisc) (qdisc->dev)
210#define qdisc_pkt_len(skb) (skb->len)
211#define QDISC_GET(dev) (dev->qdisc_sleeping)
212#else
213#define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
214#endif
215
216#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
217#define QDISC_SAVED_NUM(dev) 1
218#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
219#define QDISC_SAVED_NUM(dev) dev->num_tx_queues
220#else
221#define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
222#endif
223
224#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
225#define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
226#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
227#define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
228 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
229#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
230
231struct VBoxNetQDiscPriv
232{
233 /** Pointer to the single child qdisc. */
234 struct Qdisc *pChild;
235 /*
236 * Technically it is possible to have different qdiscs for different TX
237 * queues so we have to save them all.
238 */
239 /** Pointer to the array of saved qdiscs. */
240 struct Qdisc **ppSaved;
241 /** Pointer to the net filter instance. */
242 PVBOXNETFLTINS pVBoxNetFlt;
243};
244typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
245
246//#define VBOXNETFLT_QDISC_ENQUEUE
247static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
248{
249 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
250 int rc;
251
252#ifdef VBOXNETFLT_QDISC_ENQUEUE
253 if (VALID_PTR(pPriv->pVBoxNetFlt))
254 {
255 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
256 PCRTNETETHERHDR pEtherHdr;
257 PINTNETTRUNKSWPORT pSwitchPort;
258 uint32_t cbHdrs = skb_headlen(skb);
259
260 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
261 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
262 if ( pEtherHdr
263 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
264 && VALID_PTR(pSwitchPort)
265 && cbHdrs >= 6)
266 {
267 /** @todo consider reference counting, etc. */
268 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
269 if (enmDecision == INTNETSWDECISION_INTNET)
270 {
271 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
272 pBuf->pkt_type = PACKET_OUTGOING;
273 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
274 qdisc_drop(skb, sch);
275 ++sch->bstats.packets;
276 sch->bstats.bytes += qdisc_pkt_len(skb);
277 return NET_XMIT_SUCCESS;
278 }
279 }
280 }
281#endif /* VBOXNETFLT_QDISC_ENQUEUE */
282 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
283 if (rc == NET_XMIT_SUCCESS)
284 {
285 ++sch->q.qlen;
286 ++sch->bstats.packets;
287 sch->bstats.bytes += qdisc_pkt_len(skb);
288 }
289 else
290 ++sch->qstats.drops;
291 return rc;
292}
293
294static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
295{
296 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
297#ifdef VBOXNETFLT_QDISC_ENQUEUE
298 --sch->q.qlen;
299 return pPriv->pChild->dequeue(pPriv->pChild);
300#else /* VBOXNETFLT_QDISC_ENQUEUE */
301 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
302 PCRTNETETHERHDR pEtherHdr;
303 PINTNETTRUNKSWPORT pSwitchPort;
304 struct sk_buff *pSkb;
305
306 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
307
308 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
309 {
310 struct sk_buff *pBuf;
311 INTNETSWDECISION enmDecision;
312 uint32_t cbHdrs;
313
314 --sch->q.qlen;
315
316 if (!VALID_PTR(pPriv->pVBoxNetFlt))
317 break;
318
319 cbHdrs = skb_headlen(pSkb);
320 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
321 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
322 if ( !pEtherHdr
323 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
324 || !VALID_PTR(pSwitchPort)
325 || cbHdrs < 6)
326 break;
327
328 /** @todo consider reference counting, etc. */
329 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
330 if (enmDecision != INTNETSWDECISION_INTNET)
331 break;
332
333 pBuf = skb_copy(pSkb, GFP_ATOMIC);
334 pBuf->pkt_type = PACKET_OUTGOING;
335 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
336 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
337 qdisc_drop(pSkb, sch);
338 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
339 pSkb->data[0], pSkb->data[1], pSkb->data[2],
340 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
341 }
342
343 return pSkb;
344#endif /* VBOXNETFLT_QDISC_ENQUEUE */
345}
346
347#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
348static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
349{
350 int rc;
351 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
352
353 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
354 if (rc == 0)
355 {
356 sch->q.qlen++;
357#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
358 sch->qstats.requeues++;
359#endif
360 }
361
362 return rc;
363}
364#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
365
366static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
367{
368 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
369 unsigned int cbLen;
370
371 if (pPriv->pChild->ops->drop)
372 {
373 cbLen = pPriv->pChild->ops->drop(pPriv->pChild);
374 if (cbLen != 0)
375 {
376 ++sch->qstats.drops;
377 --sch->q.qlen;
378 return cbLen;
379 }
380 }
381
382 return 0;
383}
384
385#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
386static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
387#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
388static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
389#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
390{
391 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
392 struct net_device *pDev = qdisc_dev(sch);
393
394 pPriv->pVBoxNetFlt = NULL;
395
396 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
397 GFP_KERNEL);
398 if (!pPriv->ppSaved)
399 return -ENOMEM;
400
401 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
402 &pfifo_qdisc_ops,
403 TC_H_MAKE(TC_H_MAJ(sch->handle),
404 TC_H_MIN(1)));
405 if (!pPriv->pChild)
406 {
407 kfree(pPriv->ppSaved);
408 pPriv->ppSaved = NULL;
409 return -ENOMEM;
410 }
411
412 return 0;
413}
414
415static void vboxNetFltQdiscReset(struct Qdisc *sch)
416{
417 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
418
419 qdisc_reset(pPriv->pChild);
420 sch->q.qlen = 0;
421 sch->qstats.backlog = 0;
422}
423
424static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
425{
426 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
427 struct net_device *pDev = qdisc_dev(sch);
428
429 qdisc_destroy(pPriv->pChild);
430 pPriv->pChild = NULL;
431
432 if (pPriv->ppSaved)
433 {
434 int i;
435 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
436 if (pPriv->ppSaved[i])
437 qdisc_destroy(pPriv->ppSaved[i]);
438 kfree(pPriv->ppSaved);
439 pPriv->ppSaved = NULL;
440 }
441}
442
443static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
444 struct Qdisc **ppOld)
445{
446 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
447
448 if (pNew == NULL)
449 pNew = &noop_qdisc;
450
451 sch_tree_lock(sch);
452 *ppOld = pPriv->pChild;
453 pPriv->pChild = pNew;
454#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
455 sch->q.qlen = 0;
456#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
457 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
458#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
459 qdisc_reset(*ppOld);
460 sch_tree_unlock(sch);
461
462 return 0;
463}
464
465static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
466{
467 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
468 return pPriv->pChild;
469}
470
471static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
472{
473 return 1;
474}
475
476static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
477{
478}
479
480#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
481static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
482 struct rtattr **tca, unsigned long *arg)
483#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
484static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
485 struct nlattr **tca, unsigned long *arg)
486#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
487{
488 return -ENOSYS;
489}
490
491static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
492{
493 return -ENOSYS;
494}
495
496static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
497{
498 if (!walker->stop) {
499 if (walker->count >= walker->skip)
500 if (walker->fn(sch, 1, walker) < 0) {
501 walker->stop = 1;
502 return;
503 }
504 walker->count++;
505 }
506}
507
508static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
509{
510 return NULL;
511}
512
513static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
514 struct sk_buff *skb, struct tcmsg *tcm)
515{
516 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
517
518 if (cl != 1)
519 return -ENOENT;
520
521 tcm->tcm_handle |= TC_H_MIN(1);
522 tcm->tcm_info = pPriv->pChild->handle;
523
524 return 0;
525}
526
527
528static struct Qdisc_class_ops g_VBoxNetFltClassOps =
529{
530 .graft = vboxNetFltClassGraft,
531 .leaf = vboxNetFltClassLeaf,
532 .get = vboxNetFltClassGet,
533 .put = vboxNetFltClassPut,
534 .change = vboxNetFltClassChange,
535 .delete = vboxNetFltClassDelete,
536 .walk = vboxNetFltClassWalk,
537 .tcf_chain = vboxNetFltClassFindTcf,
538 .dump = vboxNetFltClassDump,
539};
540
541
542static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
543 .cl_ops = &g_VBoxNetFltClassOps,
544 .id = "vboxnetflt",
545 .priv_size = sizeof(struct VBoxNetQDiscPriv),
546 .enqueue = vboxNetFltQdiscEnqueue,
547 .dequeue = vboxNetFltQdiscDequeue,
548#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
549 .requeue = vboxNetFltQdiscRequeue,
550#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
551 .peek = qdisc_peek_dequeued,
552#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
553 .drop = vboxNetFltQdiscDrop,
554 .init = vboxNetFltQdiscInit,
555 .reset = vboxNetFltQdiscReset,
556 .destroy = vboxNetFltQdiscDestroy,
557 .owner = THIS_MODULE
558};
559
560/*
561 * If our qdisc is already attached to the device (that means the user
562 * installed it from command line with 'tc' command) we simply update
563 * the pointer to vboxnetflt instance in qdisc's private structure.
564 * Otherwise we need to take some additional steps:
565 * - Create our qdisc;
566 * - Save all references to qdiscs;
567 * - Replace our child with the first qdisc reference;
568 * - Replace all references so they point to our qdisc.
569 */
570static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
571{
572#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
573 int i;
574#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
575 PVBOXNETQDISCPRIV pPriv;
576
577 struct Qdisc *pExisting = QDISC_GET(pDev);
578 if (strcmp(pExisting->ops->id, "vboxnetflt"))
579 {
580 /* The existing qdisc is different from ours, let's create new one. */
581 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
582 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
583 if (!pNew)
584 return; // TODO: Error?
585
586 if (!try_module_get(THIS_MODULE))
587 {
588 /*
589 * This may cause a memory leak but calling qdisc_destroy()
590 * is not an option as it will call module_put().
591 */
592 return;
593 }
594 pPriv = qdisc_priv(pNew);
595
596 qdisc_destroy(pPriv->pChild);
597 pPriv->pChild = QDISC_GET(pDev);
598 atomic_inc(&pPriv->pChild->refcnt);
599 /*
600 * There is no need in deactivating the device or acquiring any locks
601 * prior changing qdiscs since we do not destroy the old qdisc.
602 * Atomic replacement of pointers is enough.
603 */
604 /*
605 * No need to change reference counters here as we merely move
606 * the pointer and the reference counter of the newly allocated
607 * qdisc is already 1.
608 */
609#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
610 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
611 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
612 ASMAtomicWritePtr(&pDev->qdisc, pNew);
613#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
614 for (i = 0; i < pDev->num_tx_queues; i++)
615 {
616 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
617
618 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
619 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
620 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
621 if (i)
622 atomic_inc(&pNew->refcnt);
623 }
624 /* Newer kernels store root qdisc in netdev structure as well. */
625# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
626 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
627 ASMAtomicWritePtr(&pDev->qdisc, pNew);
628 atomic_inc(&pNew->refcnt);
629# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
630#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
631 /* Synch the queue len with our child */
632 pNew->q.qlen = pPriv->pChild->q.qlen;
633 }
634 else
635 {
636 /* We already have vboxnetflt qdisc, let's use it. */
637 pPriv = qdisc_priv(pExisting);
638 }
639 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
640 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
641}
642
643static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
644{
645#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
646 int i;
647#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
648 PVBOXNETQDISCPRIV pPriv;
649 struct Qdisc *pQdisc, *pChild;
650 if (!pDev)
651 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
652 if (!VALID_PTR(pDev))
653 {
654 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
655 pDev);
656 return; // TODO: Consider returing an error
657 }
658
659
660 pQdisc = QDISC_GET(pDev);
661 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
662 {
663 /* Looks like the user has replaced our qdisc manually. */
664 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
665 pQdisc->ops->id);
666 return; // TODO: Consider returing an error
667 }
668
669 pPriv = qdisc_priv(pQdisc);
670 Assert(pPriv->pVBoxNetFlt == pThis);
671 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
672 pChild = ASMAtomicXchgPtrT(&pPriv->pChild, &noop_qdisc, struct Qdisc *);
673 qdisc_destroy(pChild); /* It won't be the last reference. */
674
675 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
676 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
677#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
678 /* Play it safe, make sure the qdisc is not being used. */
679 if (pPriv->ppSaved[0])
680 {
681 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
682 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
683 pPriv->ppSaved[0] = NULL;
684 while (QDISC_IS_BUSY(pDev, pQdisc))
685 yield();
686 qdisc_destroy(pQdisc); /* Destroy reference */
687 }
688#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
689 for (i = 0; i < pDev->num_tx_queues; i++)
690 {
691 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
692 if (pPriv->ppSaved[i])
693 {
694 Assert(pQueue->qdisc_sleeping == pQdisc);
695 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
696 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
697 pPriv->ppSaved[i] = NULL;
698 while (QDISC_IS_BUSY(pDev, pQdisc))
699 yield();
700 qdisc_destroy(pQdisc); /* Destroy reference */
701 }
702 }
703 /* Newer kernels store root qdisc in netdev structure as well. */
704#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
705 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
706 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
707 while (QDISC_IS_BUSY(pDev, pQdisc))
708 yield();
709 qdisc_destroy(pQdisc); /* Destroy reference */
710#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
711#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
712
713 /*
714 * At this point all references to our qdisc should be gone
715 * unless the user had installed it manually.
716 */
717 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
718}
719
720#endif /* VBOXNETFLT_WITH_QDISC */
721
722
723/**
724 * Initialize module.
725 *
726 * @returns appropriate status code.
727 */
728static int __init VBoxNetFltLinuxInit(void)
729{
730 int rc;
731 /*
732 * Initialize IPRT.
733 */
734 rc = RTR0Init(0);
735 if (RT_SUCCESS(rc))
736 {
737 Log(("VBoxNetFltLinuxInit\n"));
738
739 /*
740 * Initialize the globals and connect to the support driver.
741 *
742 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
743 * for establishing the connect to the support driver.
744 */
745 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
746 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
747 if (RT_SUCCESS(rc))
748 {
749#ifdef VBOXNETFLT_WITH_QDISC
750 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
751 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
752 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
753 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
754 if (rc)
755 {
756 LogRel(("VBoxNetFlt: Failed to registed qdisc: %d\n", rc));
757 return rc;
758 }
759#endif /* VBOXNETFLT_WITH_QDISC */
760 LogRel(("VBoxNetFlt: Successfully started.\n"));
761 return 0;
762 }
763
764 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
765 RTR0Term();
766 }
767 else
768 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
769
770 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
771 return -RTErrConvertToErrno(rc);
772}
773
774
775/**
776 * Unload the module.
777 *
778 * @todo We have to prevent this if we're busy!
779 */
780static void __exit VBoxNetFltLinuxUnload(void)
781{
782 int rc;
783 Log(("VBoxNetFltLinuxUnload\n"));
784 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
785
786#ifdef VBOXNETFLT_WITH_QDISC
787 unregister_qdisc(&g_VBoxNetFltQDiscOps);
788#endif /* VBOXNETFLT_WITH_QDISC */
789 /*
790 * Undo the work done during start (in reverse order).
791 */
792 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
793 AssertRC(rc); NOREF(rc);
794
795 RTR0Term();
796
797 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
798
799 Log(("VBoxNetFltLinuxUnload - done\n"));
800}
801
802
803/**
804 * Experiment where we filter trafic from the host to the internal network
805 * before it reaches the NIC driver.
806 *
807 * The current code uses a very ugly hack and only works on kernels using the
808 * net_device_ops (>= 2.6.29). It has been shown to give us a
809 * performance boost of 60-100% though. So, we have to find some less hacky way
810 * of getting this job done eventually.
811 *
812 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
813 */
814#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
815
816/**
817 * The overridden net_device_ops of the device we're attached to.
818 *
819 * Requires Linux 2.6.29 or later.
820 *
821 * This is a very dirty hack that was create to explore how much we can improve
822 * the host to guest transfers by not CC'ing the NIC.
823 */
824typedef struct VBoxNetDeviceOpsOverride
825{
826 /** Our overridden ops. */
827 struct net_device_ops Ops;
828 /** Magic word. */
829 uint32_t u32Magic;
830 /** Pointer to the original ops. */
831 struct net_device_ops const *pOrgOps;
832 /** Pointer to the net filter instance. */
833 PVBOXNETFLTINS pVBoxNetFlt;
834 /** The number of filtered packages. */
835 uint64_t cFiltered;
836 /** The total number of packets */
837 uint64_t cTotal;
838} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
839/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
840#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
841
842/**
843 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
844 * because they belong on the internal network.
845 *
846 * @returns NETDEV_TX_XXX.
847 * @param pSkb The socket buffer to transmit.
848 * @param pDev The net device.
849 */
850static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
851{
852 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
853 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
854 PCRTNETETHERHDR pEtherHdr;
855 PINTNETTRUNKSWPORT pSwitchPort;
856 uint32_t cbHdrs;
857
858
859 /*
860 * Validate the override structure.
861 *
862 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
863 * to be production quality code, we would have to be much more
864 * careful here and avoid the race.
865 */
866 if ( !VALID_PTR(pOverride)
867 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
868 || !VALID_PTR(pOverride->pOrgOps))
869 {
870 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
871 dev_kfree_skb(pSkb);
872 return NETDEV_TX_OK;
873 }
874 pOverride->cTotal++;
875
876 /*
877 * Do the filtering base on the defaul OUI of our virtual NICs
878 *
879 * Note! In a real solution, we would ask the switch whether the
880 * destination MAC is 100% to be on the internal network and then
881 * drop it.
882 */
883 cbHdrs = skb_headlen(pSkb);
884 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
885 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
886 if ( pEtherHdr
887 && VALID_PTR(pOverride->pVBoxNetFlt)
888 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
889 && VALID_PTR(pSwitchPort)
890 && cbHdrs >= 6)
891 {
892 INTNETSWDECISION enmDecision;
893
894 /** @todo consider reference counting, etc. */
895 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
896 if (enmDecision == INTNETSWDECISION_INTNET)
897 {
898 dev_kfree_skb(pSkb);
899 pOverride->cFiltered++;
900 return NETDEV_TX_OK;
901 }
902 }
903
904 return pOverride->pOrgOps->ndo_start_xmit(pSkb, pDev);
905}
906
907/**
908 * Hooks the device ndo_start_xmit operation of the device.
909 *
910 * @param pThis The net filter instance.
911 * @param pDev The net device.
912 */
913static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
914{
915 PVBOXNETDEVICEOPSOVERRIDE pOverride;
916 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
917
918 pOverride = RTMemAlloc(sizeof(*pOverride));
919 if (!pOverride)
920 return;
921 pOverride->pOrgOps = pDev->netdev_ops;
922 pOverride->Ops = *pDev->netdev_ops;
923 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
924 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
925 pOverride->cTotal = 0;
926 pOverride->cFiltered = 0;
927 pOverride->pVBoxNetFlt = pThis;
928
929 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
930 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride);
931 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
932}
933
934/**
935 * Undos what vboxNetFltLinuxHookDev did.
936 *
937 * @param pThis The net filter instance.
938 * @param pDev The net device. Can be NULL, in which case
939 * we'll try retrieve it from @a pThis.
940 */
941static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
942{
943 PVBOXNETDEVICEOPSOVERRIDE pOverride;
944 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
945
946 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
947 if (!pDev)
948 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
949 if (VALID_PTR(pDev))
950 {
951 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
952 if ( VALID_PTR(pOverride)
953 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
954 && VALID_PTR(pOverride->pOrgOps)
955 )
956 {
957 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride->pOrgOps);
958 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
959 }
960 else
961 pOverride = NULL;
962 }
963 else
964 pOverride = NULL;
965 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
966
967 if (pOverride)
968 {
969 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
970 RTMemFree(pOverride);
971 }
972}
973
974#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
975
976
977/**
978 * Reads and retains the host interface handle.
979 *
980 * @returns The handle, NULL if detached.
981 * @param pThis
982 */
983DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
984{
985#if 0
986 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
987 struct net_device *pDev = NULL;
988
989 Log(("vboxNetFltLinuxRetainNetDev\n"));
990 /*
991 * Be careful here to avoid problems racing the detached callback.
992 */
993 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
994 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
995 {
996 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
997 if (pDev)
998 {
999 dev_hold(pDev);
1000 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1001 }
1002 }
1003 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1004
1005 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
1006 return pDev;
1007#else
1008 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1009#endif
1010}
1011
1012
1013/**
1014 * Release the host interface handle previously retained
1015 * by vboxNetFltLinuxRetainNetDev.
1016 *
1017 * @param pThis The instance.
1018 * @param pDev The vboxNetFltLinuxRetainNetDev
1019 * return value, NULL is fine.
1020 */
1021DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
1022{
1023#if 0
1024 Log(("vboxNetFltLinuxReleaseNetDev\n"));
1025 NOREF(pThis);
1026 if (pDev)
1027 {
1028 dev_put(pDev);
1029 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1030 }
1031 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1032#endif
1033}
1034
1035#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1036#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1037
1038/**
1039 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1040 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1041 *
1042 * @returns true / false accordingly.
1043 * @param pBuf The sk_buff.
1044 */
1045DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1046{
1047 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1048}
1049
1050
1051/**
1052 * Internal worker that create a linux sk_buff for a
1053 * (scatter/)gather list.
1054 *
1055 * @returns Pointer to the sk_buff.
1056 * @param pThis The instance.
1057 * @param pSG The (scatter/)gather list.
1058 * @param fDstWire Set if the destination is the wire.
1059 */
1060static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1061{
1062 struct sk_buff *pPkt;
1063 struct net_device *pDev;
1064 unsigned fGsoType = 0;
1065
1066 if (pSG->cbTotal == 0)
1067 {
1068 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1069 return NULL;
1070 }
1071
1072 /** @todo We should use fragments mapping the SG buffers with large packets.
1073 * 256 bytes seems to be the a threshold used a lot for this. It
1074 * requires some nasty work on the intnet side though... */
1075 /*
1076 * Allocate a packet and copy over the data.
1077 */
1078 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1079 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1080 if (RT_UNLIKELY(!pPkt))
1081 {
1082 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1083 pSG->pvUserData = NULL;
1084 return NULL;
1085 }
1086 pPkt->dev = pDev;
1087 pPkt->ip_summed = CHECKSUM_NONE;
1088
1089 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1090 skb_reserve(pPkt, NET_IP_ALIGN);
1091
1092 /* Copy the segments. */
1093 skb_put(pPkt, pSG->cbTotal);
1094 IntNetSgRead(pSG, pPkt->data);
1095
1096#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1097 /*
1098 * Setup GSO if used by this packet.
1099 */
1100 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1101 {
1102 default:
1103 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1104 /* fall thru */
1105 case PDMNETWORKGSOTYPE_INVALID:
1106 fGsoType = 0;
1107 break;
1108 case PDMNETWORKGSOTYPE_IPV4_TCP:
1109 fGsoType = SKB_GSO_TCPV4;
1110 break;
1111 case PDMNETWORKGSOTYPE_IPV4_UDP:
1112 fGsoType = SKB_GSO_UDP;
1113 break;
1114 case PDMNETWORKGSOTYPE_IPV6_TCP:
1115 fGsoType = SKB_GSO_TCPV6;
1116 break;
1117 }
1118 if (fGsoType)
1119 {
1120 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1121
1122 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1123 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1124 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1125
1126 /*
1127 * We need to set checksum fields even if the packet goes to the host
1128 * directly as it may be immediately forwared by IP layer @bugref{5020}.
1129 */
1130 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1131 pPkt->ip_summed = CHECKSUM_PARTIAL;
1132# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1133 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1134 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1135 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1136 else
1137 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1138# else
1139 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1140 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1141 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1142 else
1143 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1144# endif
1145 if (!fDstWire)
1146 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, PDMNETCSUMTYPE_PSEUDO);
1147 }
1148#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1149
1150 /*
1151 * Finish up the socket buffer.
1152 */
1153 pPkt->protocol = eth_type_trans(pPkt, pDev);
1154 if (fDstWire)
1155 {
1156 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1157
1158 /* Restore ethernet header back. */
1159 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1160 VBOX_SKB_RESET_MAC_HDR(pPkt);
1161 }
1162 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1163
1164 return pPkt;
1165}
1166
1167
1168/**
1169 * Initializes a SG list from an sk_buff.
1170 *
1171 * @returns Number of segments.
1172 * @param pThis The instance.
1173 * @param pBuf The sk_buff.
1174 * @param pSG The SG.
1175 * @param pvFrame The frame pointer, optional.
1176 * @param cSegs The number of segments allocated for the SG.
1177 * This should match the number in the mbuf exactly!
1178 * @param fSrc The source of the frame.
1179 * @param pGso Pointer to the GSO context if it's a GSO
1180 * internal network frame. NULL if regular frame.
1181 */
1182DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1183 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1184{
1185 int i;
1186 NOREF(pThis);
1187
1188 Assert(!skb_shinfo(pBuf)->frag_list);
1189
1190 if (!pGsoCtx)
1191 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1192 else
1193 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1194
1195#ifdef VBOXNETFLT_SG_SUPPORT
1196 pSG->aSegs[0].cb = skb_headlen(pBuf);
1197 pSG->aSegs[0].pv = pBuf->data;
1198 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1199
1200 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1201 {
1202 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1203 pSG->aSegs[i+1].cb = pFrag->size;
1204 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1205 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1206 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1207 }
1208 ++i;
1209
1210#else
1211 pSG->aSegs[0].cb = pBuf->len;
1212 pSG->aSegs[0].pv = pBuf->data;
1213 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1214 i = 1;
1215#endif
1216
1217 pSG->cSegsUsed = i;
1218
1219#ifdef PADD_RUNT_FRAMES_FROM_HOST
1220 /*
1221 * Add a trailer if the frame is too small.
1222 *
1223 * Since we're getting to the packet before it is framed, it has not
1224 * yet been padded. The current solution is to add a segment pointing
1225 * to a buffer containing all zeros and pray that works for all frames...
1226 */
1227 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1228 {
1229 static uint8_t const s_abZero[128] = {0};
1230
1231 AssertReturnVoid(i < cSegs);
1232
1233 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1234 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1235 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1236 pSG->cbTotal = 60;
1237 pSG->cSegsUsed++;
1238 Assert(i + 1 <= pSG->cSegsAlloc)
1239 }
1240#endif
1241
1242 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1243 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1244 for (i = 0; i < pSG->cSegsUsed; i++)
1245 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1246 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1247}
1248
1249/**
1250 * Packet handler,
1251 *
1252 * @returns 0 or EJUSTRETURN.
1253 * @param pThis The instance.
1254 * @param pMBuf The mbuf.
1255 * @param pvFrame The start of the frame, optional.
1256 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1257 * @param eProtocol The protocol.
1258 */
1259#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1260static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1261 struct net_device *pSkbDev,
1262 struct packet_type *pPacketType,
1263 struct net_device *pOrigDev)
1264#else
1265static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1266 struct net_device *pSkbDev,
1267 struct packet_type *pPacketType)
1268#endif
1269{
1270 PVBOXNETFLTINS pThis;
1271 struct net_device *pDev;
1272 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1273 pBuf, pSkbDev, pPacketType));
1274#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1275 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1276 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1277 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1278#else
1279 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1280 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1281#endif
1282 /*
1283 * Drop it immediately?
1284 */
1285 if (!pBuf)
1286 return 0;
1287
1288 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1289 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1290 if (pThis->u.s.pDev != pSkbDev)
1291 {
1292 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1293 return 0;
1294 }
1295
1296 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1297 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1298 {
1299 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1300 dev_kfree_skb(pBuf);
1301 return 0;
1302 }
1303
1304#ifndef VBOXNETFLT_SG_SUPPORT
1305 {
1306 /*
1307 * Get rid of fragmented packets, they cause too much trouble.
1308 */
1309 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1310 kfree_skb(pBuf);
1311 if (!pCopy)
1312 {
1313 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1314 return 0;
1315 }
1316 pBuf = pCopy;
1317# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1318 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1319 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1320 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1321# else
1322 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1323 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1324# endif
1325 }
1326#endif
1327
1328#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1329 /* Forward it to the internal network. */
1330 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1331#else
1332 /* Add the packet to transmit queue and schedule the bottom half. */
1333 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1334 schedule_work(&pThis->u.s.XmitTask);
1335 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1336 &pThis->u.s.XmitTask, pBuf));
1337#endif
1338
1339 /* It does not really matter what we return, it is ignored by the kernel. */
1340 return 0;
1341}
1342
1343/**
1344 * Calculate the number of INTNETSEG segments the socket buffer will need.
1345 *
1346 * @returns Segment count.
1347 * @param pBuf The socket buffer.
1348 */
1349DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1350{
1351#ifdef VBOXNETFLT_SG_SUPPORT
1352 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1353#else
1354 unsigned cSegs = 1;
1355#endif
1356#ifdef PADD_RUNT_FRAMES_FROM_HOST
1357 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1358 if (pBuf->len < 60)
1359 cSegs++;
1360#endif
1361 return cSegs;
1362}
1363
1364/**
1365 * Destroy the intnet scatter / gather buffer created by
1366 * vboxNetFltLinuxSkBufToSG.
1367 */
1368static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1369{
1370#ifdef VBOXNETFLT_SG_SUPPORT
1371 int i;
1372
1373 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1374 {
1375 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1376 kunmap(pSG->aSegs[i+1].pv);
1377 }
1378#endif
1379 NOREF(pSG);
1380}
1381
1382#ifdef LOG_ENABLED
1383/**
1384 * Logging helper.
1385 */
1386static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1387{
1388 uint8_t *pInt, *pExt;
1389 static int iPacketNo = 1;
1390 iPacketNo += iIncrement;
1391 if (fEgress)
1392 {
1393 pExt = pSG->aSegs[0].pv;
1394 pInt = pExt + 6;
1395 }
1396 else
1397 {
1398 pInt = pSG->aSegs[0].pv;
1399 pExt = pInt + 6;
1400 }
1401 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1402 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1403 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1404 fEgress ? "-->" : "<--", pszWhere,
1405 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1406 pSG->cbTotal, iPacketNo));
1407 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1408}
1409#else
1410# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1411#endif
1412
1413#ifdef VBOXNETFLT_WITH_GSO_RECV
1414
1415/**
1416 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1417 * GSO socket buffer without having to segment it.
1418 *
1419 * @returns true on success, false if needs segmenting.
1420 * @param pThis The net filter instance.
1421 * @param pSkb The GSO socket buffer.
1422 * @param fSrc The source.
1423 * @param pGsoCtx Where to return the GSO context on success.
1424 */
1425static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1426 PPDMNETWORKGSO pGsoCtx)
1427{
1428 PDMNETWORKGSOTYPE enmGsoType;
1429 uint16_t uEtherType;
1430 unsigned int cbTransport;
1431 unsigned int offTransport;
1432 unsigned int cbTransportHdr;
1433 unsigned uProtocol;
1434 union
1435 {
1436 RTNETIPV4 IPv4;
1437 RTNETIPV6 IPv6;
1438 RTNETTCP Tcp;
1439 uint8_t ab[40];
1440 uint16_t au16[40/2];
1441 uint32_t au32[40/4];
1442 } Buf;
1443
1444 /*
1445 * Check the GSO properties of the socket buffer and make sure it fits.
1446 */
1447 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1448 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1449 {
1450 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1451 return false;
1452 }
1453 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1454 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1455 {
1456 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1457 return false;
1458 }
1459 /*
1460 * It is possible to receive GSO packets from wire if GRO is enabled.
1461 */
1462 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1463 {
1464 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1465#ifdef VBOXNETFLT_WITH_GRO
1466 /*
1467 * The packet came from the wire and the driver has already consumed
1468 * mac header. We need to restore it back.
1469 */
1470 pSkb->mac_len = skb_network_header(pSkb) - skb_mac_header(pSkb);
1471 skb_push(pSkb, pSkb->mac_len);
1472 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1473 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1474#else /* !VBOXNETFLT_WITH_GRO */
1475 /* Older kernels didn't have GRO. */
1476 return false;
1477#endif /* !VBOXNETFLT_WITH_GRO */
1478 }
1479 else
1480 {
1481 /*
1482 * skb_gso_segment does the following. Do we need to do it as well?
1483 */
1484#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1485 skb_reset_mac_header(pSkb);
1486 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1487#else
1488 pSkb->mac.raw = pSkb->data;
1489 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1490#endif
1491 }
1492
1493 /*
1494 * Switch on the ethertype.
1495 */
1496 uEtherType = pSkb->protocol;
1497 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1498 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1499 {
1500 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1501 if (puEtherType)
1502 uEtherType = *puEtherType;
1503 }
1504 switch (uEtherType)
1505 {
1506 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1507 {
1508 unsigned int cbHdr;
1509 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1510 if (RT_UNLIKELY(!pIPv4))
1511 {
1512 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1513 return false;
1514 }
1515
1516 cbHdr = pIPv4->ip_hl * 4;
1517 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1518 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1519 || cbHdr > cbTransport ))
1520 {
1521 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1522 return false;
1523 }
1524 cbTransport -= cbHdr;
1525 offTransport = pSkb->mac_len + cbHdr;
1526 uProtocol = pIPv4->ip_p;
1527 if (uProtocol == RTNETIPV4_PROT_TCP)
1528 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1529 else if (uProtocol == RTNETIPV4_PROT_UDP)
1530 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1531 else /** @todo IPv6: 4to6 tunneling */
1532 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1533 break;
1534 }
1535
1536 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1537 {
1538 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1539 if (RT_UNLIKELY(!pIPv6))
1540 {
1541 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1542 return false;
1543 }
1544
1545 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1546 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1547 uProtocol = pIPv6->ip6_nxt;
1548 /** @todo IPv6: Dig our way out of the other headers. */
1549 if (uProtocol == RTNETIPV4_PROT_TCP)
1550 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1551 else if (uProtocol == RTNETIPV4_PROT_UDP)
1552 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1553 else
1554 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1555 break;
1556 }
1557
1558 default:
1559 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1560 return false;
1561 }
1562
1563 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1564 {
1565 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1566 return false;
1567 }
1568
1569 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1570 || offTransport + cbTransport > pSkb->len
1571 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1572 {
1573 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1574 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1575 return false;
1576 }
1577
1578 /*
1579 * Check the TCP/UDP bits.
1580 */
1581 if (uProtocol == RTNETIPV4_PROT_TCP)
1582 {
1583 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1584 if (RT_UNLIKELY(!pTcp))
1585 {
1586 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1587 return false;
1588 }
1589
1590 cbTransportHdr = pTcp->th_off * 4;
1591 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1592 || cbTransportHdr > cbTransport
1593 || offTransport + cbTransportHdr >= UINT8_MAX
1594 || offTransport + cbTransportHdr >= pSkb->len ))
1595 {
1596 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1597 return false;
1598 }
1599
1600 }
1601 else
1602 {
1603 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1604 cbTransportHdr = sizeof(RTNETUDP);
1605 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1606 || offTransport + cbTransportHdr >= pSkb->len ))
1607 {
1608 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1609 return false;
1610 }
1611 }
1612
1613 /*
1614 * We're good, init the GSO context.
1615 */
1616 pGsoCtx->u8Type = enmGsoType;
1617 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1618 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1619 pGsoCtx->offHdr1 = pSkb->mac_len;
1620 pGsoCtx->offHdr2 = offTransport;
1621 pGsoCtx->au8Unused[0] = 0;
1622 pGsoCtx->au8Unused[1] = 0;
1623
1624 return true;
1625}
1626
1627/**
1628 * Forward the socket buffer as a GSO internal network frame.
1629 *
1630 * @returns IPRT status code.
1631 * @param pThis The net filter instance.
1632 * @param pSkb The GSO socket buffer.
1633 * @param fSrc The source.
1634 * @param pGsoCtx Where to return the GSO context on success.
1635 */
1636static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1637{
1638 int rc;
1639 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1640 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1641 {
1642 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1643 if (RT_LIKELY(pSG))
1644 {
1645 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1646
1647 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1648 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1649
1650 vboxNetFltLinuxDestroySG(pSG);
1651 rc = VINF_SUCCESS;
1652 }
1653 else
1654 {
1655 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1656 rc = VERR_NO_MEMORY;
1657 }
1658 }
1659 else
1660 {
1661 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1662 rc = VERR_INTERNAL_ERROR_3;
1663 }
1664
1665 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1666 dev_kfree_skb(pSkb);
1667 return rc;
1668}
1669
1670#endif /* VBOXNETFLT_WITH_GSO_RECV */
1671
1672/**
1673 * Worker for vboxNetFltLinuxForwardToIntNet.
1674 *
1675 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1676 * @param pThis The net filter instance.
1677 * @param pBuf The socket buffer.
1678 * @param fSrc The source.
1679 */
1680static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1681{
1682 int rc;
1683 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1684 if (cSegs <= MAX_SKB_FRAGS + 1)
1685 {
1686 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1687 if (RT_LIKELY(pSG))
1688 {
1689 if (fSrc & INTNETTRUNKDIR_WIRE)
1690 {
1691 /*
1692 * The packet came from wire, ethernet header was removed by device driver.
1693 * Restore it.
1694 */
1695 skb_push(pBuf, ETH_HLEN);
1696 }
1697
1698 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1699
1700 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1701 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1702
1703 vboxNetFltLinuxDestroySG(pSG);
1704 rc = VINF_SUCCESS;
1705 }
1706 else
1707 {
1708 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1709 rc = VERR_NO_MEMORY;
1710 }
1711 }
1712 else
1713 {
1714 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1715 rc = VERR_INTERNAL_ERROR_3;
1716 }
1717
1718 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1719 dev_kfree_skb(pBuf);
1720 return rc;
1721}
1722
1723/**
1724 *
1725 * @param pBuf The socket buffer. This is consumed by this function.
1726 */
1727static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1728{
1729 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1730
1731#ifdef VBOXNETFLT_WITH_GSO
1732 if (skb_is_gso(pBuf))
1733 {
1734 PDMNETWORKGSO GsoCtx;
1735 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1736 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1737# ifdef VBOXNETFLT_WITH_GSO_RECV
1738 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1739 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1740 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1741 else
1742# endif
1743 {
1744 /* Need to segment the packet */
1745 struct sk_buff *pNext;
1746 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1747 if (IS_ERR(pSegment))
1748 {
1749 dev_kfree_skb(pBuf);
1750 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1751 return;
1752 }
1753
1754 for (; pSegment; pSegment = pNext)
1755 {
1756 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1757 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1758 pNext = pSegment->next;
1759 pSegment->next = 0;
1760 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1761 }
1762 dev_kfree_skb(pBuf);
1763 }
1764 }
1765 else
1766#endif /* VBOXNETFLT_WITH_GSO */
1767 {
1768 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1769 {
1770#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1771 /*
1772 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1773 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1774 * header length from the header itself and reconstruct 'h' pointer
1775 * to TCP (or whatever) header.
1776 */
1777 unsigned char *tmp = pBuf->h.raw;
1778 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1779 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1780#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1781 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1782 {
1783 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1784 dev_kfree_skb(pBuf);
1785 return;
1786 }
1787#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1788 /* Restore the original (wrong) pointer. */
1789 pBuf->h.raw = tmp;
1790#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1791 }
1792 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1793 }
1794}
1795
1796#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1797/**
1798 * Work queue handler that forwards the socket buffers queued by
1799 * vboxNetFltLinuxPacketHandler to the internal network.
1800 *
1801 * @param pWork The work queue.
1802 */
1803# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1804static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1805# else
1806static void vboxNetFltLinuxXmitTask(void *pWork)
1807# endif
1808{
1809 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1810 struct sk_buff *pBuf;
1811
1812 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1813
1814 /*
1815 * Active? Retain the instance and increment the busy counter.
1816 */
1817 if (vboxNetFltTryRetainBusyActive(pThis))
1818 {
1819 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1820 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1821
1822 vboxNetFltRelease(pThis, true /* fBusy */);
1823 }
1824 else
1825 {
1826 /** @todo Shouldn't we just drop the packets here? There is little point in
1827 * making them accumulate when the VM is paused and it'll only waste
1828 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1829 * before start draining the packets (goes for the intnet ring buf
1830 * too)? */
1831 }
1832}
1833#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1834
1835/**
1836 * Reports the GSO capabilites of the hardware NIC.
1837 *
1838 * @param pThis The net filter instance. The caller hold a
1839 * reference to this.
1840 */
1841static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1842{
1843#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1844 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1845 {
1846 struct net_device *pDev;
1847 PINTNETTRUNKSWPORT pSwitchPort;
1848 unsigned int fFeatures;
1849 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1850
1851 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1852
1853 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1854 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1855 if (pDev)
1856 fFeatures = pDev->features;
1857 else
1858 fFeatures = 0;
1859
1860 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1861
1862 if (pThis->pSwitchPort)
1863 {
1864 /* Set/update the GSO capabilities of the NIC. */
1865 uint32_t fGsoCapabilites = 0;
1866 if (fFeatures & NETIF_F_TSO)
1867 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1868 if (fFeatures & NETIF_F_TSO6)
1869 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1870# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1871 if (fFeatures & NETIF_F_UFO)
1872 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1873 if (fFeatures & NETIF_F_UFO)
1874 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1875# endif
1876 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1877 }
1878
1879 vboxNetFltRelease(pThis, true /*fBusy*/);
1880 }
1881#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1882}
1883
1884/**
1885 * Helper that determins whether the host (ignoreing us) is operating the
1886 * interface in promiscuous mode or not.
1887 */
1888static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1889{
1890 bool fRc = false;
1891 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1892 if (pDev)
1893 {
1894 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1895 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1896 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1897 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1898 }
1899 return fRc;
1900}
1901
1902/**
1903 * Internal worker for vboxNetFltLinuxNotifierCallback.
1904 *
1905 * @returns VBox status code.
1906 * @param pThis The instance.
1907 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
1908 * flood the release log.
1909 */
1910static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
1911{
1912 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1913 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
1914
1915 /*
1916 * Retain and store the device.
1917 */
1918 dev_hold(pDev);
1919
1920 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1921 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
1922 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1923
1924 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1925 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
1926
1927 /* Get the mac address while we still have a valid net_device reference. */
1928 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
1929
1930 /*
1931 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
1932 */
1933 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
1934 pThis->u.s.PacketType.dev = pDev;
1935 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
1936 dev_add_pack(&pThis->u.s.PacketType);
1937
1938#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
1939 vboxNetFltLinuxHookDev(pThis, pDev);
1940#endif
1941#ifdef VBOXNETFLT_WITH_QDISC
1942 vboxNetFltLinuxQdiscInstall(pThis, pDev);
1943#endif /* VBOXNETFLT_WITH_QDISC */
1944
1945 /*
1946 * Set indicators that require the spinlock. Be abit paranoid about racing
1947 * the device notification handle.
1948 */
1949 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1950 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1951 if (pDev)
1952 {
1953 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
1954 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
1955 pDev = NULL; /* don't dereference it */
1956 }
1957 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1958 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
1959
1960 /*
1961 * If the above succeeded report GSO capabilites, if not undo and
1962 * release the device.
1963 */
1964 if (!pDev)
1965 {
1966 Assert(pThis->pSwitchPort);
1967 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1968 {
1969 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
1970 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
1971 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
1972 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
1973 vboxNetFltRelease(pThis, true /*fBusy*/);
1974 }
1975 }
1976 else
1977 {
1978#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
1979 vboxNetFltLinuxUnhookDev(pThis, pDev);
1980#endif
1981#ifdef VBOXNETFLT_WITH_QDISC
1982 vboxNetFltLinuxQdiscRemove(pThis, pDev);
1983#endif /* VBOXNETFLT_WITH_QDISC */
1984 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1985 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
1986 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1987 dev_put(pDev);
1988 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1989 }
1990
1991 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
1992 return VINF_SUCCESS;
1993}
1994
1995
1996static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1997{
1998 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1999
2000 Assert(!pThis->fDisconnectedFromHost);
2001
2002#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2003 vboxNetFltLinuxUnhookDev(pThis, pDev);
2004#endif
2005#ifdef VBOXNETFLT_WITH_QDISC
2006 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2007#endif /* VBOXNETFLT_WITH_QDISC */
2008
2009 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2010 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
2011 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
2012 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2013 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2014
2015 dev_remove_pack(&pThis->u.s.PacketType);
2016#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2017 skb_queue_purge(&pThis->u.s.XmitQueue);
2018#endif
2019 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2020 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
2021 dev_put(pDev);
2022
2023 return NOTIFY_OK;
2024}
2025
2026static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2027{
2028 /* Check if we are not suspended and promiscuous mode has not been set. */
2029 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2030 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2031 {
2032 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2033 dev_set_promiscuity(pDev, 1);
2034 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2035 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2036 }
2037 else
2038 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2039 return NOTIFY_OK;
2040}
2041
2042static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2043{
2044 /* Undo promiscuous mode if we has set it. */
2045 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2046 {
2047 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2048 dev_set_promiscuity(pDev, -1);
2049 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2050 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2051 }
2052 else
2053 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2054 return NOTIFY_OK;
2055}
2056
2057#ifdef LOG_ENABLED
2058/** Stringify the NETDEV_XXX constants. */
2059static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2060{
2061 const char *pszEvent = "NETDRV_<unknown>";
2062 switch (ulEventType)
2063 {
2064 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2065 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2066 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2067 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2068 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2069 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2070 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2071 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2072 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2073 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2074# ifdef NETDEV_FEAT_CHANGE
2075 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2076# endif
2077 }
2078 return pszEvent;
2079}
2080#endif /* LOG_ENABLED */
2081
2082/**
2083 * Callback for listening to netdevice events.
2084 *
2085 * This works the rediscovery, clean up on unregistration, promiscuity on
2086 * up/down, and GSO feature changes from ethtool.
2087 *
2088 * @returns NOTIFY_OK
2089 * @param self Pointer to our notifier registration block.
2090 * @param ulEventType The event.
2091 * @param ptr Event specific, but it is usually the device it
2092 * relates to.
2093 */
2094static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2095
2096{
2097 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2098 struct net_device *pDev = (struct net_device *)ptr;
2099 int rc = NOTIFY_OK;
2100
2101 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2102 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2103 if ( ulEventType == NETDEV_REGISTER
2104 && !strcmp(pDev->name, pThis->szName))
2105 {
2106 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2107 }
2108 else
2109 {
2110 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2111 if (pDev == ptr)
2112 {
2113 switch (ulEventType)
2114 {
2115 case NETDEV_UNREGISTER:
2116 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2117 break;
2118 case NETDEV_UP:
2119 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2120 break;
2121 case NETDEV_GOING_DOWN:
2122 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2123 break;
2124 case NETDEV_CHANGENAME:
2125 break;
2126#ifdef NETDEV_FEAT_CHANGE
2127 case NETDEV_FEAT_CHANGE:
2128 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2129 break;
2130#endif
2131 }
2132 }
2133 }
2134
2135 return rc;
2136}
2137
2138bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2139{
2140 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2141}
2142
2143int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2144{
2145 struct net_device * pDev;
2146 int err;
2147 int rc = VINF_SUCCESS;
2148 NOREF(pvIfData);
2149
2150 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2151
2152 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2153 if (pDev)
2154 {
2155 /*
2156 * Create a sk_buff for the gather list and push it onto the wire.
2157 */
2158 if (fDst & INTNETTRUNKDIR_WIRE)
2159 {
2160 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2161 if (pBuf)
2162 {
2163 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2164 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2165 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2166 err = dev_queue_xmit(pBuf);
2167 if (err)
2168 rc = RTErrConvertFromErrno(err);
2169 }
2170 else
2171 rc = VERR_NO_MEMORY;
2172 }
2173
2174 /*
2175 * Create a sk_buff for the gather list and push it onto the host stack.
2176 */
2177 if (fDst & INTNETTRUNKDIR_HOST)
2178 {
2179 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2180 if (pBuf)
2181 {
2182 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2183 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2184 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2185 err = netif_rx_ni(pBuf);
2186 if (err)
2187 rc = RTErrConvertFromErrno(err);
2188 }
2189 else
2190 rc = VERR_NO_MEMORY;
2191 }
2192
2193 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2194 }
2195
2196 return rc;
2197}
2198
2199
2200void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2201{
2202 struct net_device * pDev;
2203
2204 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2205 pThis, pThis->szName, fActive?"true":"false",
2206 pThis->fDisablePromiscuous?"true":"false"));
2207
2208 if (pThis->fDisablePromiscuous)
2209 return;
2210
2211 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2212 if (pDev)
2213 {
2214 /*
2215 * This api is a bit weird, the best reference is the code.
2216 *
2217 * Also, we have a bit or race conditions wrt the maintance of
2218 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2219 */
2220#ifdef LOG_ENABLED
2221 u_int16_t fIf;
2222 unsigned const cPromiscBefore = pDev->promiscuity;
2223#endif
2224 if (fActive)
2225 {
2226 Assert(!pThis->u.s.fPromiscuousSet);
2227
2228 rtnl_lock();
2229 dev_set_promiscuity(pDev, 1);
2230 rtnl_unlock();
2231 pThis->u.s.fPromiscuousSet = true;
2232 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2233 }
2234 else
2235 {
2236 if (pThis->u.s.fPromiscuousSet)
2237 {
2238 rtnl_lock();
2239 dev_set_promiscuity(pDev, -1);
2240 rtnl_unlock();
2241 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2242 }
2243 pThis->u.s.fPromiscuousSet = false;
2244
2245#ifdef LOG_ENABLED
2246 fIf = dev_get_flags(pDev);
2247 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2248#endif
2249 }
2250
2251 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2252 }
2253}
2254
2255
2256int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2257{
2258#ifdef VBOXNETFLT_WITH_QDISC
2259 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2260#endif /* VBOXNETFLT_WITH_QDISC */
2261 /*
2262 * Remove packet handler when we get disconnected from internal switch as
2263 * we don't want the handler to forward packets to disconnected switch.
2264 */
2265 dev_remove_pack(&pThis->u.s.PacketType);
2266 return VINF_SUCCESS;
2267}
2268
2269
2270int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2271{
2272 /*
2273 * Report the GSO capabilities of the host and device (if connected).
2274 * Note! No need to mark ourselves busy here.
2275 */
2276 /** @todo duplicate work here now? Attach */
2277#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2278 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2279 0
2280 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2281 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2282# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2283 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2284 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2285# endif
2286 , INTNETTRUNKDIR_HOST);
2287
2288#endif
2289 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2290
2291 return VINF_SUCCESS;
2292}
2293
2294
2295void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2296{
2297 struct net_device *pDev;
2298 bool fRegistered;
2299 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2300
2301#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2302 vboxNetFltLinuxUnhookDev(pThis, NULL);
2303#endif
2304
2305 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2306 * unlikely, but none the less). Since it doesn't actually update the
2307 * state (just reads it), it is likely to panic in some interesting
2308 * ways. */
2309
2310 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2311 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2312 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2313 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2314
2315 if (fRegistered)
2316 {
2317#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2318 skb_queue_purge(&pThis->u.s.XmitQueue);
2319#endif
2320 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2321 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
2322 dev_put(pDev);
2323 }
2324 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2325 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2326 module_put(THIS_MODULE);
2327}
2328
2329
2330int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2331{
2332 int err;
2333 NOREF(pvContext);
2334
2335 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2336 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2337 if (err)
2338 return VERR_INTNET_FLT_IF_FAILED;
2339 if (!pThis->u.s.fRegistered)
2340 {
2341 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2342 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2343 return VERR_INTNET_FLT_IF_NOT_FOUND;
2344 }
2345
2346 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2347 if ( pThis->fDisconnectedFromHost
2348 || !try_module_get(THIS_MODULE))
2349 return VERR_INTNET_FLT_IF_FAILED;
2350
2351 return VINF_SUCCESS;
2352}
2353
2354int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2355{
2356 /*
2357 * Init the linux specific members.
2358 */
2359 pThis->u.s.pDev = NULL;
2360 pThis->u.s.fRegistered = false;
2361 pThis->u.s.fPromiscuousSet = false;
2362 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2363#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2364 skb_queue_head_init(&pThis->u.s.XmitQueue);
2365# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2366 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2367# else
2368 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2369# endif
2370#endif
2371
2372 return VINF_SUCCESS;
2373}
2374
2375
2376void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2377{
2378 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2379}
2380
2381
2382int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2383{
2384 /* Nothing to do */
2385 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2386 return VINF_SUCCESS;
2387}
2388
2389
2390int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2391{
2392 /* Nothing to do */
2393 NOREF(pThis); NOREF(pvIfData);
2394 return VINF_SUCCESS;
2395}
2396
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette