VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 30771

Last change on this file since 30771 was 30771, checked in by vboxsync, 15 years ago

vboxnetflt: RHEL 5 GRO fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 81.5 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 30771 2010-07-09 17:26:55Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53/*
54 * Uncomment the following line to enable qdisc support.
55 */
56//#define VBOXNETFLT_WITH_QDISC
57#ifdef VBOXNETFLT_WITH_QDISC
58#include <net/pkt_sched.h>
59#endif /* VBOXNETFLT_WITH_QDISC */
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
66#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
67#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
68# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
69#endif
70
71#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
72# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
73# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
74#else
75# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
76# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
77#endif
78
79#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
80# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
81#else
82# define CHECKSUM_PARTIAL CHECKSUM_HW
83# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
84# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
85# else
86# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
87# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
88# else
89# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
90# endif
91# endif
92#endif
93
94#ifndef NET_IP_ALIGN
95# define NET_IP_ALIGN 2
96#endif
97
98#if 0
99/** Create scatter / gather segments for fragments. When not used, we will
100 * linearize the socket buffer before creating the internal networking SG. */
101# define VBOXNETFLT_SG_SUPPORT 1
102#endif
103
104#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
105/** Indicates that the linux kernel may send us GSO frames. */
106# define VBOXNETFLT_WITH_GSO 1
107
108/** This enables or disables the transmitting of GSO frame from the internal
109 * network and to the host. */
110# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
111
112# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
113/** This enables or disables the transmitting of GSO frame from the internal
114 * network and to the wire. */
115# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
116# endif
117
118/** This enables or disables the forwarding/flooding of GSO frame from the host
119 * to the internal network. */
120# define VBOXNETFLT_WITH_GSO_RECV 1
121
122#endif
123
124#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
125/** This enables or disables handling of GSO frames coming from the wire (GRO). */
126# define VBOXNETFLT_WITH_GRO 1
127#endif
128/*
129 * GRO support was backported to RHEL 5.4
130 */
131#ifdef RHEL_RELEASE_CODE
132# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
133# define VBOXNETFLT_WITH_GRO 1
134# endif
135#endif
136
137/*******************************************************************************
138* Internal Functions *
139*******************************************************************************/
140static int VBoxNetFltLinuxInit(void);
141static void VBoxNetFltLinuxUnload(void);
142static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
143
144
145/*******************************************************************************
146* Global Variables *
147*******************************************************************************/
148/**
149 * The (common) global data.
150 */
151static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
152
153module_init(VBoxNetFltLinuxInit);
154module_exit(VBoxNetFltLinuxUnload);
155
156MODULE_AUTHOR(VBOX_VENDOR);
157MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
158MODULE_LICENSE("GPL");
159#ifdef MODULE_VERSION
160MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
161#endif
162
163
164#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
165unsigned dev_get_flags(const struct net_device *dev)
166{
167 unsigned flags;
168
169 flags = (dev->flags & ~(IFF_PROMISC |
170 IFF_ALLMULTI |
171 IFF_RUNNING)) |
172 (dev->gflags & (IFF_PROMISC |
173 IFF_ALLMULTI));
174
175 if (netif_running(dev) && netif_carrier_ok(dev))
176 flags |= IFF_RUNNING;
177
178 return flags;
179}
180#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
181
182
183#ifdef VBOXNETFLT_WITH_QDISC
184//#define QDISC_LOG(x) printk x
185#define QDISC_LOG(x) do { } while (0)
186
187#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
188#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
189#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
190#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
191#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
192#define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
193#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
194
195#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
196#define qdisc_dev(qdisc) (qdisc->dev)
197#define qdisc_pkt_len(skb) (skb->len)
198#define QDISC_GET(dev) (dev->qdisc_sleeping)
199#else
200#define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
201#endif
202
203#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
204#define QDISC_SAVED_NUM(dev) 1
205#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
206#define QDISC_SAVED_NUM(dev) dev->num_tx_queues
207#else
208#define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
209#endif
210
211#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
212#define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
213#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
214#define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
215 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
216#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
217
218struct VBoxNetQDiscPriv
219{
220 /** Pointer to the single child qdisc. */
221 struct Qdisc *pChild;
222 /*
223 * Technically it is possible to have different qdiscs for different TX
224 * queues so we have to save them all.
225 */
226 /** Pointer to the array of saved qdiscs. */
227 struct Qdisc **ppSaved;
228 /** Pointer to the net filter instance. */
229 PVBOXNETFLTINS pVBoxNetFlt;
230};
231typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
232
233//#define VBOXNETFLT_QDISC_ENQUEUE
234static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
235{
236 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
237 int rc;
238
239#ifdef VBOXNETFLT_QDISC_ENQUEUE
240 if (VALID_PTR(pPriv->pVBoxNetFlt))
241 {
242 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
243 PCRTNETETHERHDR pEtherHdr;
244 PINTNETTRUNKSWPORT pSwitchPort;
245 uint32_t cbHdrs = skb_headlen(skb);
246
247 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
248 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
249 if ( pEtherHdr
250 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
251 && VALID_PTR(pSwitchPort)
252 && cbHdrs >= 6)
253 {
254 /** @todo consider reference counting, etc. */
255 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
256 if (enmDecision == INTNETSWDECISION_INTNET)
257 {
258 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
259 pBuf->pkt_type = PACKET_OUTGOING;
260 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
261 qdisc_drop(skb, sch);
262 ++sch->bstats.packets;
263 sch->bstats.bytes += qdisc_pkt_len(skb);
264 return NET_XMIT_SUCCESS;
265 }
266 }
267 }
268#endif /* VBOXNETFLT_QDISC_ENQUEUE */
269 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
270 if (rc == NET_XMIT_SUCCESS)
271 {
272 ++sch->q.qlen;
273 ++sch->bstats.packets;
274 sch->bstats.bytes += qdisc_pkt_len(skb);
275 }
276 else
277 ++sch->qstats.drops;
278 return rc;
279}
280
281static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
282{
283 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
284#ifdef VBOXNETFLT_QDISC_ENQUEUE
285 --sch->q.qlen;
286 return pPriv->pChild->dequeue(pPriv->pChild);
287#else /* VBOXNETFLT_QDISC_ENQUEUE */
288 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
289 PCRTNETETHERHDR pEtherHdr;
290 PINTNETTRUNKSWPORT pSwitchPort;
291 struct sk_buff *pSkb;
292
293 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
294
295 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
296 {
297 struct sk_buff *pBuf;
298 INTNETSWDECISION enmDecision;
299 uint32_t cbHdrs;
300
301 --sch->q.qlen;
302
303 if (!VALID_PTR(pPriv->pVBoxNetFlt))
304 break;
305
306 cbHdrs = skb_headlen(pSkb);
307 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
308 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
309 if ( !pEtherHdr
310 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
311 || !VALID_PTR(pSwitchPort)
312 || cbHdrs < 6)
313 break;
314
315 /** @todo consider reference counting, etc. */
316 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
317 if (enmDecision != INTNETSWDECISION_INTNET)
318 break;
319
320 pBuf = skb_copy(pSkb, GFP_ATOMIC);
321 pBuf->pkt_type = PACKET_OUTGOING;
322 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
323 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
324 qdisc_drop(pSkb, sch);
325 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
326 pSkb->data[0], pSkb->data[1], pSkb->data[2],
327 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
328 }
329
330 return pSkb;
331#endif /* VBOXNETFLT_QDISC_ENQUEUE */
332}
333
334#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
335static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
336{
337 int rc;
338 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
339
340 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
341 if (rc == 0)
342 {
343 sch->q.qlen++;
344 sch->qstats.requeues++;
345 }
346
347 return rc;
348}
349#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
350
351static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
352{
353 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
354
355 ++sch->qstats.drops;
356 --sch->q.qlen;
357 if (pPriv->pChild->ops->drop)
358 return pPriv->pChild->ops->drop(pPriv->pChild);
359
360 return 0;
361}
362
363#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
364static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
365#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
366static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
367#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
368{
369 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
370 struct net_device *pDev = qdisc_dev(sch);
371
372 pPriv->pVBoxNetFlt = NULL;
373
374 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
375 GFP_KERNEL);
376 if (!pPriv->ppSaved)
377 return -ENOMEM;
378
379 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
380 &pfifo_qdisc_ops,
381 TC_H_MAKE(TC_H_MAJ(sch->handle),
382 TC_H_MIN(1)));
383 if (!pPriv->pChild)
384 {
385 kfree(pPriv->ppSaved);
386 pPriv->ppSaved = NULL;
387 return -ENOMEM;
388 }
389
390 return 0;
391}
392
393static void vboxNetFltQdiscReset(struct Qdisc *sch)
394{
395 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
396
397 qdisc_reset(pPriv->pChild);
398 sch->q.qlen = 0;
399 sch->qstats.backlog = 0;
400}
401
402static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
403{
404 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
405 struct net_device *pDev = qdisc_dev(sch);
406
407 qdisc_destroy(pPriv->pChild);
408 pPriv->pChild = NULL;
409
410 if (pPriv->ppSaved)
411 {
412 int i;
413 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
414 if (pPriv->ppSaved[i])
415 qdisc_destroy(pPriv->ppSaved[i]);
416 kfree(pPriv->ppSaved);
417 pPriv->ppSaved = NULL;
418 }
419}
420
421static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
422 struct Qdisc **ppOld)
423{
424 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
425
426 if (pNew == NULL)
427 pNew = &noop_qdisc;
428
429 sch_tree_lock(sch);
430 *ppOld = pPriv->pChild;
431 pPriv->pChild = pNew;
432#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
433 sch->q.qlen = 0;
434#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
435 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
436#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
437 qdisc_reset(*ppOld);
438 sch_tree_unlock(sch);
439
440 return 0;
441}
442
443static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
444{
445 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
446 return pPriv->pChild;
447}
448
449static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
450{
451 return 1;
452}
453
454static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
455{
456}
457
458#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
459static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
460 struct rtattr **tca, unsigned long *arg)
461#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
462static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
463 struct nlattr **tca, unsigned long *arg)
464#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
465{
466 return -ENOSYS;
467}
468
469static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
470{
471 return -ENOSYS;
472}
473
474static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
475{
476 if (!walker->stop) {
477 if (walker->count >= walker->skip)
478 if (walker->fn(sch, 1, walker) < 0) {
479 walker->stop = 1;
480 return;
481 }
482 walker->count++;
483 }
484}
485
486static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
487{
488 return NULL;
489}
490
491static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
492 struct sk_buff *skb, struct tcmsg *tcm)
493{
494 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
495
496 if (cl != 1)
497 return -ENOENT;
498
499 tcm->tcm_handle |= TC_H_MIN(1);
500 tcm->tcm_info = pPriv->pChild->handle;
501
502 return 0;
503}
504
505
506static struct Qdisc_class_ops g_VBoxNetFltClassOps =
507{
508 .graft = vboxNetFltClassGraft,
509 .leaf = vboxNetFltClassLeaf,
510 .get = vboxNetFltClassGet,
511 .put = vboxNetFltClassPut,
512 .change = vboxNetFltClassChange,
513 .delete = vboxNetFltClassDelete,
514 .walk = vboxNetFltClassWalk,
515 .tcf_chain = vboxNetFltClassFindTcf,
516 .dump = vboxNetFltClassDump,
517};
518
519
520static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
521 .cl_ops = &g_VBoxNetFltClassOps,
522 .id = "vboxnetflt",
523 .priv_size = sizeof(struct VBoxNetQDiscPriv),
524 .enqueue = vboxNetFltQdiscEnqueue,
525 .dequeue = vboxNetFltQdiscDequeue,
526#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
527 .requeue = vboxNetFltQdiscRequeue,
528#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
529 .peek = qdisc_peek_dequeued,
530#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
531 .drop = vboxNetFltQdiscDrop,
532 .init = vboxNetFltQdiscInit,
533 .reset = vboxNetFltQdiscReset,
534 .destroy = vboxNetFltQdiscDestroy,
535 .owner = THIS_MODULE
536};
537
538/*
539 * If our qdisc is already attached to the device (that means the user
540 * installed it from command line with 'tc' command) we simply update
541 * the pointer to vboxnetflt instance in qdisc's private structure.
542 * Otherwise we need to take some additional steps:
543 * - Create our qdisc;
544 * - Save all references to qdiscs;
545 * - Replace our child with the first qdisc reference;
546 * - Replace all references so they point to our qdisc.
547 */
548static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
549{
550#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
551 int i;
552#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
553 PVBOXNETQDISCPRIV pPriv;
554
555 struct Qdisc *pExisting = QDISC_GET(pDev);
556 if (strcmp(pExisting->ops->id, "vboxnetflt"))
557 {
558 /* The existing qdisc is different from ours, let's create new one. */
559 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
560 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
561 if (!pNew)
562 return; // TODO: Error?
563
564 if (!try_module_get(THIS_MODULE))
565 {
566 /*
567 * This may cause a memory leak but calling qdisc_destroy()
568 * is not an option as it will call module_put().
569 */
570 return;
571 }
572 pPriv = qdisc_priv(pNew);
573
574 qdisc_destroy(pPriv->pChild);
575 pPriv->pChild = QDISC_GET(pDev);
576 atomic_inc(&pPriv->pChild->refcnt);
577 /*
578 * There is no need in deactivating the device or acquiring any locks
579 * prior changing qdiscs since we do not destroy the old qdisc.
580 * Atomic replacement of pointers is enough.
581 */
582 /*
583 * No need to change reference counters here as we merely move
584 * the pointer and the reference counter of the newly allocated
585 * qdisc is already 1.
586 */
587#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
588 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
589 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
590 ASMAtomicWritePtr(&pDev->qdisc, pNew);
591#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
592 for (i = 0; i < pDev->num_tx_queues; i++)
593 {
594 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
595
596 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
597 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
598 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
599 if (i)
600 atomic_inc(&pNew->refcnt);
601 }
602 /* Newer kernels store root qdisc in netdev structure as well. */
603# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
604 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
605 ASMAtomicWritePtr(&pDev->qdisc, pNew);
606 atomic_inc(&pNew->refcnt);
607# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
608#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
609 }
610 else
611 {
612 /* We already have vboxnetflt qdisc, let's use it. */
613 pPriv = qdisc_priv(pExisting);
614 }
615 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
616 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
617}
618
619static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
620{
621#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
622 int i;
623#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
624 PVBOXNETQDISCPRIV pPriv;
625 struct Qdisc *pQdisc;
626 if (!pDev)
627 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
628 if (!VALID_PTR(pDev))
629 {
630 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
631 pDev);
632 return; // TODO: Consider returing an error
633 }
634
635
636 pQdisc = QDISC_GET(pDev);
637 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
638 {
639 /* Looks like the user has replaced our qdisc manually. */
640 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
641 pQdisc->ops->id);
642 return; // TODO: Consider returing an error
643 }
644
645 pPriv = qdisc_priv(pQdisc);
646 Assert(pPriv->pVBoxNetFlt == pThis);
647 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
648
649 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
650 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
651#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
652 /* Play it safe, make sure the qdisc is not being used. */
653 if (pPriv->ppSaved[0])
654 {
655 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
656 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
657 pPriv->ppSaved[0] = NULL;
658 while (QDISC_IS_BUSY(pDev, pQdisc))
659 yield();
660 qdisc_destroy(pQdisc); /* Destroy reference */
661 }
662#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
663 for (i = 0; i < pDev->num_tx_queues; i++)
664 {
665 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
666 if (pPriv->ppSaved[i])
667 {
668 Assert(pQueue->qdisc_sleeping == pQdisc);
669 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
670 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
671 pPriv->ppSaved[i] = NULL;
672 while (QDISC_IS_BUSY(pDev, pQdisc))
673 yield();
674 qdisc_destroy(pQdisc); /* Destroy reference */
675 }
676 }
677 /* Newer kernels store root qdisc in netdev structure as well. */
678#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
679 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
680 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
681 while (QDISC_IS_BUSY(pDev, pQdisc))
682 yield();
683 qdisc_destroy(pQdisc); /* Destroy reference */
684#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
685#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
686
687 /*
688 * At this point all references to our qdisc should be gone
689 * unless the user had installed it manually.
690 */
691 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
692}
693
694#endif /* VBOXNETFLT_WITH_QDISC */
695
696
697/**
698 * Initialize module.
699 *
700 * @returns appropriate status code.
701 */
702static int __init VBoxNetFltLinuxInit(void)
703{
704 int rc;
705 /*
706 * Initialize IPRT.
707 */
708 rc = RTR0Init(0);
709 if (RT_SUCCESS(rc))
710 {
711 Log(("VBoxNetFltLinuxInit\n"));
712
713 /*
714 * Initialize the globals and connect to the support driver.
715 *
716 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
717 * for establishing the connect to the support driver.
718 */
719 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
720 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
721 if (RT_SUCCESS(rc))
722 {
723#ifdef VBOXNETFLT_WITH_QDISC
724 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
725 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
726 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
727 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
728 if (rc)
729 {
730 LogRel(("VBoxNetFlt: Failed to registed qdisc: %d\n", rc));
731 return rc;
732 }
733#endif /* VBOXNETFLT_WITH_QDISC */
734 LogRel(("VBoxNetFlt: Successfully started.\n"));
735 return 0;
736 }
737
738 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
739 RTR0Term();
740 }
741 else
742 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
743
744 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
745 return -RTErrConvertToErrno(rc);
746}
747
748
749/**
750 * Unload the module.
751 *
752 * @todo We have to prevent this if we're busy!
753 */
754static void __exit VBoxNetFltLinuxUnload(void)
755{
756 int rc;
757 Log(("VBoxNetFltLinuxUnload\n"));
758 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
759
760#ifdef VBOXNETFLT_WITH_QDISC
761 unregister_qdisc(&g_VBoxNetFltQDiscOps);
762#endif /* VBOXNETFLT_WITH_QDISC */
763 /*
764 * Undo the work done during start (in reverse order).
765 */
766 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
767 AssertRC(rc); NOREF(rc);
768
769 RTR0Term();
770
771 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
772
773 Log(("VBoxNetFltLinuxUnload - done\n"));
774}
775
776
777/**
778 * Experiment where we filter trafic from the host to the internal network
779 * before it reaches the NIC driver.
780 *
781 * The current code uses a very ugly hack and only works on kernels using the
782 * net_device_ops (>= 2.6.29). It has been shown to give us a
783 * performance boost of 60-100% though. So, we have to find some less hacky way
784 * of getting this job done eventually.
785 *
786 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
787 */
788#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
789
790/**
791 * The overridden net_device_ops of the device we're attached to.
792 *
793 * Requires Linux 2.6.29 or later.
794 *
795 * This is a very dirty hack that was create to explore how much we can improve
796 * the host to guest transfers by not CC'ing the NIC.
797 */
798typedef struct VBoxNetDeviceOpsOverride
799{
800 /** Our overridden ops. */
801 struct net_device_ops Ops;
802 /** Magic word. */
803 uint32_t u32Magic;
804 /** Pointer to the original ops. */
805 struct net_device_ops const *pOrgOps;
806 /** Pointer to the net filter instance. */
807 PVBOXNETFLTINS pVBoxNetFlt;
808 /** The number of filtered packages. */
809 uint64_t cFiltered;
810 /** The total number of packets */
811 uint64_t cTotal;
812} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
813/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
814#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
815
816/**
817 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
818 * because they belong on the internal network.
819 *
820 * @returns NETDEV_TX_XXX.
821 * @param pSkb The socket buffer to transmit.
822 * @param pDev The net device.
823 */
824static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
825{
826 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
827 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
828 PCRTNETETHERHDR pEtherHdr;
829 PINTNETTRUNKSWPORT pSwitchPort;
830 uint32_t cbHdrs;
831
832
833 /*
834 * Validate the override structure.
835 *
836 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
837 * to be production quality code, we would have to be much more
838 * careful here and avoid the race.
839 */
840 if ( !VALID_PTR(pOverride)
841 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
842 || !VALID_PTR(pOverride->pOrgOps))
843 {
844 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
845 dev_kfree_skb(pSkb);
846 return NETDEV_TX_OK;
847 }
848 pOverride->cTotal++;
849
850 /*
851 * Do the filtering base on the defaul OUI of our virtual NICs
852 *
853 * Note! In a real solution, we would ask the switch whether the
854 * destination MAC is 100% to be on the internal network and then
855 * drop it.
856 */
857 cbHdrs = skb_headlen(pSkb);
858 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
859 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
860 if ( pEtherHdr
861 && VALID_PTR(pOverride->pVBoxNetFlt)
862 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
863 && VALID_PTR(pSwitchPort)
864 && cbHdrs >= 6)
865 {
866 INTNETSWDECISION enmDecision;
867
868 /** @todo consider reference counting, etc. */
869 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
870 if (enmDecision == INTNETSWDECISION_INTNET)
871 {
872 dev_kfree_skb(pSkb);
873 pOverride->cFiltered++;
874 return NETDEV_TX_OK;
875 }
876 }
877
878 return pOverride->pOrgOps->ndo_start_xmit(pSkb, pDev);
879}
880
881/**
882 * Hooks the device ndo_start_xmit operation of the device.
883 *
884 * @param pThis The net filter instance.
885 * @param pDev The net device.
886 */
887static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
888{
889 PVBOXNETDEVICEOPSOVERRIDE pOverride;
890 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
891
892 pOverride = RTMemAlloc(sizeof(*pOverride));
893 if (!pOverride)
894 return;
895 pOverride->pOrgOps = pDev->netdev_ops;
896 pOverride->Ops = *pDev->netdev_ops;
897 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
898 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
899 pOverride->cTotal = 0;
900 pOverride->cFiltered = 0;
901 pOverride->pVBoxNetFlt = pThis;
902
903 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
904 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride);
905 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
906}
907
908/**
909 * Undos what vboxNetFltLinuxHookDev did.
910 *
911 * @param pThis The net filter instance.
912 * @param pDev The net device. Can be NULL, in which case
913 * we'll try retrieve it from @a pThis.
914 */
915static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
916{
917 PVBOXNETDEVICEOPSOVERRIDE pOverride;
918 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
919
920 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
921 if (!pDev)
922 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
923 if (VALID_PTR(pDev))
924 {
925 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
926 if ( VALID_PTR(pOverride)
927 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
928 && VALID_PTR(pOverride->pOrgOps)
929 )
930 {
931 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride->pOrgOps);
932 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
933 }
934 else
935 pOverride = NULL;
936 }
937 else
938 pOverride = NULL;
939 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
940
941 if (pOverride)
942 {
943 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
944 RTMemFree(pOverride);
945 }
946}
947
948#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
949
950
951/**
952 * Reads and retains the host interface handle.
953 *
954 * @returns The handle, NULL if detached.
955 * @param pThis
956 */
957DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
958{
959#if 0
960 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
961 struct net_device *pDev = NULL;
962
963 Log(("vboxNetFltLinuxRetainNetDev\n"));
964 /*
965 * Be careful here to avoid problems racing the detached callback.
966 */
967 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
968 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
969 {
970 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
971 if (pDev)
972 {
973 dev_hold(pDev);
974 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
975 }
976 }
977 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
978
979 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
980 return pDev;
981#else
982 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
983#endif
984}
985
986
987/**
988 * Release the host interface handle previously retained
989 * by vboxNetFltLinuxRetainNetDev.
990 *
991 * @param pThis The instance.
992 * @param pDev The vboxNetFltLinuxRetainNetDev
993 * return value, NULL is fine.
994 */
995DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
996{
997#if 0
998 Log(("vboxNetFltLinuxReleaseNetDev\n"));
999 NOREF(pThis);
1000 if (pDev)
1001 {
1002 dev_put(pDev);
1003 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1004 }
1005 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1006#endif
1007}
1008
1009#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1010#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1011
1012/**
1013 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1014 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1015 *
1016 * @returns true / false accordingly.
1017 * @param pBuf The sk_buff.
1018 */
1019DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1020{
1021 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1022}
1023
1024
1025/**
1026 * Internal worker that create a linux sk_buff for a
1027 * (scatter/)gather list.
1028 *
1029 * @returns Pointer to the sk_buff.
1030 * @param pThis The instance.
1031 * @param pSG The (scatter/)gather list.
1032 * @param fDstWire Set if the destination is the wire.
1033 */
1034static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1035{
1036 struct sk_buff *pPkt;
1037 struct net_device *pDev;
1038 unsigned fGsoType = 0;
1039
1040 if (pSG->cbTotal == 0)
1041 {
1042 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1043 return NULL;
1044 }
1045
1046 /** @todo We should use fragments mapping the SG buffers with large packets.
1047 * 256 bytes seems to be the a threshold used a lot for this. It
1048 * requires some nasty work on the intnet side though... */
1049 /*
1050 * Allocate a packet and copy over the data.
1051 */
1052 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1053 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1054 if (RT_UNLIKELY(!pPkt))
1055 {
1056 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1057 pSG->pvUserData = NULL;
1058 return NULL;
1059 }
1060 pPkt->dev = pDev;
1061 pPkt->ip_summed = CHECKSUM_NONE;
1062
1063 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1064 skb_reserve(pPkt, NET_IP_ALIGN);
1065
1066 /* Copy the segments. */
1067 skb_put(pPkt, pSG->cbTotal);
1068 IntNetSgRead(pSG, pPkt->data);
1069
1070#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1071 /*
1072 * Setup GSO if used by this packet.
1073 */
1074 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1075 {
1076 default:
1077 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1078 /* fall thru */
1079 case PDMNETWORKGSOTYPE_INVALID:
1080 fGsoType = 0;
1081 break;
1082 case PDMNETWORKGSOTYPE_IPV4_TCP:
1083 fGsoType = SKB_GSO_TCPV4;
1084 break;
1085 case PDMNETWORKGSOTYPE_IPV4_UDP:
1086 fGsoType = SKB_GSO_UDP;
1087 break;
1088 case PDMNETWORKGSOTYPE_IPV6_TCP:
1089 fGsoType = SKB_GSO_TCPV6;
1090 break;
1091 }
1092 if (fGsoType)
1093 {
1094 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1095
1096 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1097 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1098 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1099
1100 /*
1101 * We need to set checksum fields even if the packet goes to the host
1102 * directly as it may be immediately forwared by IP layer @bugref{5020}.
1103 */
1104 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1105 pPkt->ip_summed = CHECKSUM_PARTIAL;
1106# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1107 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1108 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1109 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1110 else
1111 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1112# else
1113 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1114 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1115 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1116 else
1117 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1118# endif
1119 if (!fDstWire)
1120 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, false /*fPayloadChecksum*/);
1121 }
1122#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1123
1124 /*
1125 * Finish up the socket buffer.
1126 */
1127 pPkt->protocol = eth_type_trans(pPkt, pDev);
1128 if (fDstWire)
1129 {
1130 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1131
1132 /* Restore ethernet header back. */
1133 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1134 VBOX_SKB_RESET_MAC_HDR(pPkt);
1135 }
1136 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1137
1138 return pPkt;
1139}
1140
1141
1142/**
1143 * Initializes a SG list from an sk_buff.
1144 *
1145 * @returns Number of segments.
1146 * @param pThis The instance.
1147 * @param pBuf The sk_buff.
1148 * @param pSG The SG.
1149 * @param pvFrame The frame pointer, optional.
1150 * @param cSegs The number of segments allocated for the SG.
1151 * This should match the number in the mbuf exactly!
1152 * @param fSrc The source of the frame.
1153 * @param pGso Pointer to the GSO context if it's a GSO
1154 * internal network frame. NULL if regular frame.
1155 */
1156DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1157 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1158{
1159 int i;
1160 NOREF(pThis);
1161
1162 Assert(!skb_shinfo(pBuf)->frag_list);
1163
1164 if (!pGsoCtx)
1165 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1166 else
1167 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1168
1169#ifdef VBOXNETFLT_SG_SUPPORT
1170 pSG->aSegs[0].cb = skb_headlen(pBuf);
1171 pSG->aSegs[0].pv = pBuf->data;
1172 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1173
1174 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1175 {
1176 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1177 pSG->aSegs[i+1].cb = pFrag->size;
1178 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1179 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1180 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1181 }
1182 ++i;
1183
1184#else
1185 pSG->aSegs[0].cb = pBuf->len;
1186 pSG->aSegs[0].pv = pBuf->data;
1187 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1188 i = 1;
1189#endif
1190
1191 pSG->cSegsUsed = i;
1192
1193#ifdef PADD_RUNT_FRAMES_FROM_HOST
1194 /*
1195 * Add a trailer if the frame is too small.
1196 *
1197 * Since we're getting to the packet before it is framed, it has not
1198 * yet been padded. The current solution is to add a segment pointing
1199 * to a buffer containing all zeros and pray that works for all frames...
1200 */
1201 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1202 {
1203 static uint8_t const s_abZero[128] = {0};
1204
1205 AssertReturnVoid(i < cSegs);
1206
1207 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1208 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1209 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1210 pSG->cbTotal = 60;
1211 pSG->cSegsUsed++;
1212 Assert(i + 1 <= pSG->cSegsAlloc)
1213 }
1214#endif
1215
1216 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1217 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1218 for (i = 0; i < pSG->cSegsUsed; i++)
1219 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1220 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1221}
1222
1223/**
1224 * Packet handler,
1225 *
1226 * @returns 0 or EJUSTRETURN.
1227 * @param pThis The instance.
1228 * @param pMBuf The mbuf.
1229 * @param pvFrame The start of the frame, optional.
1230 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1231 * @param eProtocol The protocol.
1232 */
1233#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1234static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1235 struct net_device *pSkbDev,
1236 struct packet_type *pPacketType,
1237 struct net_device *pOrigDev)
1238#else
1239static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1240 struct net_device *pSkbDev,
1241 struct packet_type *pPacketType)
1242#endif
1243{
1244 PVBOXNETFLTINS pThis;
1245 struct net_device *pDev;
1246 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1247 pBuf, pSkbDev, pPacketType));
1248#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1249 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1250 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1251 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1252#else
1253 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1254 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1255#endif
1256 /*
1257 * Drop it immediately?
1258 */
1259 if (!pBuf)
1260 return 0;
1261
1262 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1263 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1264 if (pThis->u.s.pDev != pSkbDev)
1265 {
1266 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1267 return 0;
1268 }
1269
1270 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1271 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1272 {
1273 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1274 dev_kfree_skb(pBuf);
1275 return 0;
1276 }
1277
1278#ifndef VBOXNETFLT_SG_SUPPORT
1279 {
1280 /*
1281 * Get rid of fragmented packets, they cause too much trouble.
1282 */
1283 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1284 kfree_skb(pBuf);
1285 if (!pCopy)
1286 {
1287 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1288 return 0;
1289 }
1290 pBuf = pCopy;
1291# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1292 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1293 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1294 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1295# else
1296 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1297 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1298# endif
1299 }
1300#endif
1301
1302#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1303 /* Forward it to the internal network. */
1304 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1305#else
1306 /* Add the packet to transmit queue and schedule the bottom half. */
1307 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1308 schedule_work(&pThis->u.s.XmitTask);
1309 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1310 &pThis->u.s.XmitTask, pBuf));
1311#endif
1312
1313 /* It does not really matter what we return, it is ignored by the kernel. */
1314 return 0;
1315}
1316
1317/**
1318 * Calculate the number of INTNETSEG segments the socket buffer will need.
1319 *
1320 * @returns Segment count.
1321 * @param pBuf The socket buffer.
1322 */
1323DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1324{
1325#ifdef VBOXNETFLT_SG_SUPPORT
1326 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1327#else
1328 unsigned cSegs = 1;
1329#endif
1330#ifdef PADD_RUNT_FRAMES_FROM_HOST
1331 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1332 if (pBuf->len < 60)
1333 cSegs++;
1334#endif
1335 return cSegs;
1336}
1337
1338/**
1339 * Destroy the intnet scatter / gather buffer created by
1340 * vboxNetFltLinuxSkBufToSG.
1341 */
1342static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1343{
1344#ifdef VBOXNETFLT_SG_SUPPORT
1345 int i;
1346
1347 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1348 {
1349 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1350 kunmap(pSG->aSegs[i+1].pv);
1351 }
1352#endif
1353 NOREF(pSG);
1354}
1355
1356#ifdef LOG_ENABLED
1357/**
1358 * Logging helper.
1359 */
1360static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1361{
1362 uint8_t *pInt, *pExt;
1363 static int iPacketNo = 1;
1364 iPacketNo += iIncrement;
1365 if (fEgress)
1366 {
1367 pExt = pSG->aSegs[0].pv;
1368 pInt = pExt + 6;
1369 }
1370 else
1371 {
1372 pInt = pSG->aSegs[0].pv;
1373 pExt = pInt + 6;
1374 }
1375 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1376 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1377 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1378 fEgress ? "-->" : "<--", pszWhere,
1379 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1380 pSG->cbTotal, iPacketNo));
1381 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1382}
1383#else
1384# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1385#endif
1386
1387#ifdef VBOXNETFLT_WITH_GSO_RECV
1388
1389/**
1390 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1391 * GSO socket buffer without having to segment it.
1392 *
1393 * @returns true on success, false if needs segmenting.
1394 * @param pThis The net filter instance.
1395 * @param pSkb The GSO socket buffer.
1396 * @param fSrc The source.
1397 * @param pGsoCtx Where to return the GSO context on success.
1398 */
1399static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1400 PPDMNETWORKGSO pGsoCtx)
1401{
1402 PDMNETWORKGSOTYPE enmGsoType;
1403 uint16_t uEtherType;
1404 unsigned int cbTransport;
1405 unsigned int offTransport;
1406 unsigned int cbTransportHdr;
1407 unsigned uProtocol;
1408 union
1409 {
1410 RTNETIPV4 IPv4;
1411 RTNETIPV6 IPv6;
1412 RTNETTCP Tcp;
1413 uint8_t ab[40];
1414 uint16_t au16[40/2];
1415 uint32_t au32[40/4];
1416 } Buf;
1417
1418 /*
1419 * Check the GSO properties of the socket buffer and make sure it fits.
1420 */
1421 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1422 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1423 {
1424 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1425 return false;
1426 }
1427 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1428 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1429 {
1430 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1431 return false;
1432 }
1433 /*
1434 * It is possible to receive GSO packets from wire if GRO is enabled.
1435 */
1436 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1437 {
1438 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1439#ifdef VBOXNETFLT_WITH_GRO
1440 /*
1441 * The packet came from the wire and the driver has already consumed
1442 * mac header. We need to restore it back.
1443 */
1444 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1445 skb_push(pSkb, pSkb->mac_len);
1446 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1447 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1448#else /* !VBOXNETFLT_WITH_GRO */
1449 /* Older kernels didn't have GRO. */
1450 return false;
1451#endif /* !VBOXNETFLT_WITH_GRO */
1452 }
1453 else
1454 {
1455 /*
1456 * skb_gso_segment does the following. Do we need to do it as well?
1457 */
1458#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1459 skb_reset_mac_header(pSkb);
1460 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1461#else
1462 pSkb->mac.raw = pSkb->data;
1463 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1464#endif
1465 }
1466
1467 /*
1468 * Switch on the ethertype.
1469 */
1470 uEtherType = pSkb->protocol;
1471 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1472 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1473 {
1474 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1475 if (puEtherType)
1476 uEtherType = *puEtherType;
1477 }
1478 switch (uEtherType)
1479 {
1480 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1481 {
1482 unsigned int cbHdr;
1483 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1484 if (RT_UNLIKELY(!pIPv4))
1485 {
1486 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1487 return false;
1488 }
1489
1490 cbHdr = pIPv4->ip_hl * 4;
1491 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1492 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1493 || cbHdr > cbTransport ))
1494 {
1495 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1496 return false;
1497 }
1498 cbTransport -= cbHdr;
1499 offTransport = pSkb->mac_len + cbHdr;
1500 uProtocol = pIPv4->ip_p;
1501 if (uProtocol == RTNETIPV4_PROT_TCP)
1502 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1503 else if (uProtocol == RTNETIPV4_PROT_UDP)
1504 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1505 else /** @todo IPv6: 4to6 tunneling */
1506 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1507 break;
1508 }
1509
1510 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1511 {
1512 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1513 if (RT_UNLIKELY(!pIPv6))
1514 {
1515 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1516 return false;
1517 }
1518
1519 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1520 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1521 uProtocol = pIPv6->ip6_nxt;
1522 /** @todo IPv6: Dig our way out of the other headers. */
1523 if (uProtocol == RTNETIPV4_PROT_TCP)
1524 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1525 else if (uProtocol == RTNETIPV4_PROT_UDP)
1526 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1527 else
1528 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1529 break;
1530 }
1531
1532 default:
1533 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1534 return false;
1535 }
1536
1537 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1538 {
1539 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1540 return false;
1541 }
1542
1543 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1544 || offTransport + cbTransport > pSkb->len
1545 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1546 {
1547 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1548 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1549 return false;
1550 }
1551
1552 /*
1553 * Check the TCP/UDP bits.
1554 */
1555 if (uProtocol == RTNETIPV4_PROT_TCP)
1556 {
1557 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1558 if (RT_UNLIKELY(!pTcp))
1559 {
1560 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1561 return false;
1562 }
1563
1564 cbTransportHdr = pTcp->th_off * 4;
1565 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1566 || cbTransportHdr > cbTransport
1567 || offTransport + cbTransportHdr >= UINT8_MAX
1568 || offTransport + cbTransportHdr >= pSkb->len ))
1569 {
1570 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1571 return false;
1572 }
1573
1574 }
1575 else
1576 {
1577 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1578 cbTransportHdr = sizeof(RTNETUDP);
1579 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1580 || offTransport + cbTransportHdr >= pSkb->len ))
1581 {
1582 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1583 return false;
1584 }
1585 }
1586
1587 /*
1588 * We're good, init the GSO context.
1589 */
1590 pGsoCtx->u8Type = enmGsoType;
1591 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1592 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1593 pGsoCtx->offHdr1 = pSkb->mac_len;
1594 pGsoCtx->offHdr2 = offTransport;
1595 pGsoCtx->au8Unused[0] = 0;
1596 pGsoCtx->au8Unused[1] = 0;
1597
1598 return true;
1599}
1600
1601/**
1602 * Forward the socket buffer as a GSO internal network frame.
1603 *
1604 * @returns IPRT status code.
1605 * @param pThis The net filter instance.
1606 * @param pSkb The GSO socket buffer.
1607 * @param fSrc The source.
1608 * @param pGsoCtx Where to return the GSO context on success.
1609 */
1610static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1611{
1612 int rc;
1613 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1614 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1615 {
1616 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1617 if (RT_LIKELY(pSG))
1618 {
1619 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1620
1621 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1622 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1623
1624 vboxNetFltLinuxDestroySG(pSG);
1625 rc = VINF_SUCCESS;
1626 }
1627 else
1628 {
1629 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1630 rc = VERR_NO_MEMORY;
1631 }
1632 }
1633 else
1634 {
1635 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1636 rc = VERR_INTERNAL_ERROR_3;
1637 }
1638
1639 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1640 dev_kfree_skb(pSkb);
1641 return rc;
1642}
1643
1644#endif /* VBOXNETFLT_WITH_GSO_RECV */
1645
1646/**
1647 * Worker for vboxNetFltLinuxForwardToIntNet.
1648 *
1649 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1650 * @param pThis The net filter instance.
1651 * @param pBuf The socket buffer.
1652 * @param fSrc The source.
1653 */
1654static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1655{
1656 int rc;
1657 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1658 if (cSegs <= MAX_SKB_FRAGS + 1)
1659 {
1660 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1661 if (RT_LIKELY(pSG))
1662 {
1663 if (fSrc & INTNETTRUNKDIR_WIRE)
1664 {
1665 /*
1666 * The packet came from wire, ethernet header was removed by device driver.
1667 * Restore it.
1668 */
1669 skb_push(pBuf, ETH_HLEN);
1670 }
1671
1672 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1673
1674 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1675 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1676
1677 vboxNetFltLinuxDestroySG(pSG);
1678 rc = VINF_SUCCESS;
1679 }
1680 else
1681 {
1682 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1683 rc = VERR_NO_MEMORY;
1684 }
1685 }
1686 else
1687 {
1688 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1689 rc = VERR_INTERNAL_ERROR_3;
1690 }
1691
1692 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1693 dev_kfree_skb(pBuf);
1694 return rc;
1695}
1696
1697/**
1698 *
1699 * @param pBuf The socket buffer. This is consumed by this function.
1700 */
1701static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1702{
1703 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1704
1705#ifdef VBOXNETFLT_WITH_GSO
1706 if (skb_is_gso(pBuf))
1707 {
1708 PDMNETWORKGSO GsoCtx;
1709 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1710 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1711# ifdef VBOXNETFLT_WITH_GSO_RECV
1712 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1713 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1714 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1715 else
1716# endif
1717 {
1718 /* Need to segment the packet */
1719 struct sk_buff *pNext;
1720 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1721 if (IS_ERR(pSegment))
1722 {
1723 dev_kfree_skb(pBuf);
1724 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1725 return;
1726 }
1727
1728 for (; pSegment; pSegment = pNext)
1729 {
1730 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1731 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1732 pNext = pSegment->next;
1733 pSegment->next = 0;
1734 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1735 }
1736 dev_kfree_skb(pBuf);
1737 }
1738 }
1739 else
1740#endif /* VBOXNETFLT_WITH_GSO */
1741 {
1742 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1743 {
1744#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1745 /*
1746 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1747 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1748 * header length from the header itself and reconstruct 'h' pointer
1749 * to TCP (or whatever) header.
1750 */
1751 unsigned char *tmp = pBuf->h.raw;
1752 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1753 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1754#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1755 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1756 {
1757 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1758 dev_kfree_skb(pBuf);
1759 return;
1760 }
1761#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1762 /* Restore the original (wrong) pointer. */
1763 pBuf->h.raw = tmp;
1764#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1765 }
1766 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1767 }
1768}
1769
1770#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1771/**
1772 * Work queue handler that forwards the socket buffers queued by
1773 * vboxNetFltLinuxPacketHandler to the internal network.
1774 *
1775 * @param pWork The work queue.
1776 */
1777# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1778static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1779# else
1780static void vboxNetFltLinuxXmitTask(void *pWork)
1781# endif
1782{
1783 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1784 struct sk_buff *pBuf;
1785
1786 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1787
1788 /*
1789 * Active? Retain the instance and increment the busy counter.
1790 */
1791 if (vboxNetFltTryRetainBusyActive(pThis))
1792 {
1793 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1794 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1795
1796 vboxNetFltRelease(pThis, true /* fBusy */);
1797 }
1798 else
1799 {
1800 /** @todo Shouldn't we just drop the packets here? There is little point in
1801 * making them accumulate when the VM is paused and it'll only waste
1802 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1803 * before start draining the packets (goes for the intnet ring buf
1804 * too)? */
1805 }
1806}
1807#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1808
1809/**
1810 * Reports the GSO capabilites of the hardware NIC.
1811 *
1812 * @param pThis The net filter instance. The caller hold a
1813 * reference to this.
1814 */
1815static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1816{
1817#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1818 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1819 {
1820 struct net_device *pDev;
1821 PINTNETTRUNKSWPORT pSwitchPort;
1822 unsigned int fFeatures;
1823 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1824
1825 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1826
1827 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1828 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1829 if (pDev)
1830 fFeatures = pDev->features;
1831 else
1832 fFeatures = 0;
1833
1834 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1835
1836 if (pThis->pSwitchPort)
1837 {
1838 /* Set/update the GSO capabilities of the NIC. */
1839 uint32_t fGsoCapabilites = 0;
1840 if (fFeatures & NETIF_F_TSO)
1841 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1842 if (fFeatures & NETIF_F_TSO6)
1843 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1844# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1845 if (fFeatures & NETIF_F_UFO)
1846 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1847 if (fFeatures & NETIF_F_UFO)
1848 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1849# endif
1850 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1851 }
1852
1853 vboxNetFltRelease(pThis, true /*fBusy*/);
1854 }
1855#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1856}
1857
1858/**
1859 * Helper that determins whether the host (ignoreing us) is operating the
1860 * interface in promiscuous mode or not.
1861 */
1862static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1863{
1864 bool fRc = false;
1865 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1866 if (pDev)
1867 {
1868 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1869 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1870 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1871 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1872 }
1873 return fRc;
1874}
1875
1876/**
1877 * Internal worker for vboxNetFltLinuxNotifierCallback.
1878 *
1879 * @returns VBox status code.
1880 * @param pThis The instance.
1881 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
1882 * flood the release log.
1883 */
1884static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
1885{
1886 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1887 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
1888
1889 /*
1890 * Retain and store the device.
1891 */
1892 dev_hold(pDev);
1893
1894 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1895 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
1896 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1897
1898 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1899 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
1900
1901 /* Get the mac address while we still have a valid net_device reference. */
1902 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
1903
1904 /*
1905 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
1906 */
1907 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
1908 pThis->u.s.PacketType.dev = pDev;
1909 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
1910 dev_add_pack(&pThis->u.s.PacketType);
1911
1912#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
1913 vboxNetFltLinuxHookDev(pThis, pDev);
1914#endif
1915#ifdef VBOXNETFLT_WITH_QDISC
1916 vboxNetFltLinuxQdiscInstall(pThis, pDev);
1917#endif /* VBOXNETFLT_WITH_QDISC */
1918
1919 /*
1920 * Set indicators that require the spinlock. Be abit paranoid about racing
1921 * the device notification handle.
1922 */
1923 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1924 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1925 if (pDev)
1926 {
1927 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
1928 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
1929 pDev = NULL; /* don't dereference it */
1930 }
1931 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1932 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
1933
1934 /*
1935 * If the above succeeded report GSO capabilites, if not undo and
1936 * release the device.
1937 */
1938 if (!pDev)
1939 {
1940 Assert(pThis->pSwitchPort);
1941 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1942 {
1943 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
1944 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
1945 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
1946 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
1947 vboxNetFltRelease(pThis, true /*fBusy*/);
1948 }
1949 }
1950 else
1951 {
1952#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
1953 vboxNetFltLinuxUnhookDev(pThis, pDev);
1954#endif
1955#ifdef VBOXNETFLT_WITH_QDISC
1956 vboxNetFltLinuxQdiscRemove(pThis, pDev);
1957#endif /* VBOXNETFLT_WITH_QDISC */
1958 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1959 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
1960 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1961 dev_put(pDev);
1962 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1963 }
1964
1965 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
1966 return VINF_SUCCESS;
1967}
1968
1969
1970static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1971{
1972 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1973
1974 Assert(!pThis->fDisconnectedFromHost);
1975
1976#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
1977 vboxNetFltLinuxUnhookDev(pThis, pDev);
1978#endif
1979#ifdef VBOXNETFLT_WITH_QDISC
1980 vboxNetFltLinuxQdiscRemove(pThis, pDev);
1981#endif /* VBOXNETFLT_WITH_QDISC */
1982
1983 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1984 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
1985 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
1986 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
1987 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1988
1989 dev_remove_pack(&pThis->u.s.PacketType);
1990#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1991 skb_queue_purge(&pThis->u.s.XmitQueue);
1992#endif
1993 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1994 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1995 dev_put(pDev);
1996
1997 return NOTIFY_OK;
1998}
1999
2000static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2001{
2002 /* Check if we are not suspended and promiscuous mode has not been set. */
2003 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2004 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2005 {
2006 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2007 dev_set_promiscuity(pDev, 1);
2008 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2009 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2010 }
2011 else
2012 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2013 return NOTIFY_OK;
2014}
2015
2016static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2017{
2018 /* Undo promiscuous mode if we has set it. */
2019 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2020 {
2021 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2022 dev_set_promiscuity(pDev, -1);
2023 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2024 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2025 }
2026 else
2027 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2028 return NOTIFY_OK;
2029}
2030
2031#ifdef LOG_ENABLED
2032/** Stringify the NETDEV_XXX constants. */
2033static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2034{
2035 const char *pszEvent = "NETDRV_<unknown>";
2036 switch (ulEventType)
2037 {
2038 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2039 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2040 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2041 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2042 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2043 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2044 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2045 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2046 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2047 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2048# ifdef NETDEV_FEAT_CHANGE
2049 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2050# endif
2051 }
2052 return pszEvent;
2053}
2054#endif /* LOG_ENABLED */
2055
2056/**
2057 * Callback for listening to netdevice events.
2058 *
2059 * This works the rediscovery, clean up on unregistration, promiscuity on
2060 * up/down, and GSO feature changes from ethtool.
2061 *
2062 * @returns NOTIFY_OK
2063 * @param self Pointer to our notifier registration block.
2064 * @param ulEventType The event.
2065 * @param ptr Event specific, but it is usually the device it
2066 * relates to.
2067 */
2068static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2069
2070{
2071 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2072 struct net_device *pDev = (struct net_device *)ptr;
2073 int rc = NOTIFY_OK;
2074
2075 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2076 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2077 if ( ulEventType == NETDEV_REGISTER
2078 && !strcmp(pDev->name, pThis->szName))
2079 {
2080 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2081 }
2082 else
2083 {
2084 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2085 if (pDev == ptr)
2086 {
2087 switch (ulEventType)
2088 {
2089 case NETDEV_UNREGISTER:
2090 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2091 break;
2092 case NETDEV_UP:
2093 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2094 break;
2095 case NETDEV_GOING_DOWN:
2096 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2097 break;
2098 case NETDEV_CHANGENAME:
2099 break;
2100#ifdef NETDEV_FEAT_CHANGE
2101 case NETDEV_FEAT_CHANGE:
2102 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2103 break;
2104#endif
2105 }
2106 }
2107 }
2108
2109 return rc;
2110}
2111
2112bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2113{
2114 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2115}
2116
2117int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2118{
2119 struct net_device * pDev;
2120 int err;
2121 int rc = VINF_SUCCESS;
2122 NOREF(pvIfData);
2123
2124 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2125
2126 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2127 if (pDev)
2128 {
2129 /*
2130 * Create a sk_buff for the gather list and push it onto the wire.
2131 */
2132 if (fDst & INTNETTRUNKDIR_WIRE)
2133 {
2134 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2135 if (pBuf)
2136 {
2137 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2138 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2139 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2140 err = dev_queue_xmit(pBuf);
2141 if (err)
2142 rc = RTErrConvertFromErrno(err);
2143 }
2144 else
2145 rc = VERR_NO_MEMORY;
2146 }
2147
2148 /*
2149 * Create a sk_buff for the gather list and push it onto the host stack.
2150 */
2151 if (fDst & INTNETTRUNKDIR_HOST)
2152 {
2153 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2154 if (pBuf)
2155 {
2156 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2157 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2158 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2159 err = netif_rx_ni(pBuf);
2160 if (err)
2161 rc = RTErrConvertFromErrno(err);
2162 }
2163 else
2164 rc = VERR_NO_MEMORY;
2165 }
2166
2167 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2168 }
2169
2170 return rc;
2171}
2172
2173
2174void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2175{
2176 struct net_device * pDev;
2177
2178 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2179 pThis, pThis->szName, fActive?"true":"false",
2180 pThis->fDisablePromiscuous?"true":"false"));
2181
2182 if (pThis->fDisablePromiscuous)
2183 return;
2184
2185 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2186 if (pDev)
2187 {
2188 /*
2189 * This api is a bit weird, the best reference is the code.
2190 *
2191 * Also, we have a bit or race conditions wrt the maintance of
2192 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2193 */
2194#ifdef LOG_ENABLED
2195 u_int16_t fIf;
2196 unsigned const cPromiscBefore = pDev->promiscuity;
2197#endif
2198 if (fActive)
2199 {
2200 Assert(!pThis->u.s.fPromiscuousSet);
2201
2202 rtnl_lock();
2203 dev_set_promiscuity(pDev, 1);
2204 rtnl_unlock();
2205 pThis->u.s.fPromiscuousSet = true;
2206 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2207 }
2208 else
2209 {
2210 if (pThis->u.s.fPromiscuousSet)
2211 {
2212 rtnl_lock();
2213 dev_set_promiscuity(pDev, -1);
2214 rtnl_unlock();
2215 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2216 }
2217 pThis->u.s.fPromiscuousSet = false;
2218
2219#ifdef LOG_ENABLED
2220 fIf = dev_get_flags(pDev);
2221 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2222#endif
2223 }
2224
2225 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2226 }
2227}
2228
2229
2230int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2231{
2232 /* Nothing to do here. */
2233 return VINF_SUCCESS;
2234}
2235
2236
2237int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2238{
2239 /*
2240 * Report the GSO capabilities of the host and device (if connected).
2241 * Note! No need to mark ourselves busy here.
2242 */
2243 /** @todo duplicate work here now? Attach */
2244#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2245 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2246 0
2247 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2248 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2249# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2250 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2251 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2252# endif
2253 , INTNETTRUNKDIR_HOST);
2254
2255#endif
2256 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2257
2258 return VINF_SUCCESS;
2259}
2260
2261
2262void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2263{
2264 struct net_device *pDev;
2265 bool fRegistered;
2266 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2267
2268#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2269 vboxNetFltLinuxUnhookDev(pThis, NULL);
2270#endif
2271#ifdef VBOXNETFLT_WITH_QDISC
2272 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2273#endif /* VBOXNETFLT_WITH_QDISC */
2274
2275 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2276 * unlikely, but none the less). Since it doesn't actually update the
2277 * state (just reads it), it is likely to panic in some interesting
2278 * ways. */
2279
2280 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2281 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2282 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2283 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2284
2285 if (fRegistered)
2286 {
2287 dev_remove_pack(&pThis->u.s.PacketType);
2288#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2289 skb_queue_purge(&pThis->u.s.XmitQueue);
2290#endif
2291 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2292 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
2293 dev_put(pDev);
2294 }
2295 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2296 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2297 module_put(THIS_MODULE);
2298}
2299
2300
2301int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2302{
2303 int err;
2304 NOREF(pvContext);
2305
2306 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2307 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2308 if (err)
2309 return VERR_INTNET_FLT_IF_FAILED;
2310 if (!pThis->u.s.fRegistered)
2311 {
2312 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2313 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2314 return VERR_INTNET_FLT_IF_NOT_FOUND;
2315 }
2316
2317 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2318 if ( pThis->fDisconnectedFromHost
2319 || !try_module_get(THIS_MODULE))
2320 return VERR_INTNET_FLT_IF_FAILED;
2321
2322 return VINF_SUCCESS;
2323}
2324
2325int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2326{
2327 /*
2328 * Init the linux specific members.
2329 */
2330 pThis->u.s.pDev = NULL;
2331 pThis->u.s.fRegistered = false;
2332 pThis->u.s.fPromiscuousSet = false;
2333 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2334#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2335 skb_queue_head_init(&pThis->u.s.XmitQueue);
2336# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2337 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2338# else
2339 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2340# endif
2341#endif
2342
2343 return VINF_SUCCESS;
2344}
2345
2346
2347void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2348{
2349 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2350}
2351
2352
2353int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2354{
2355 /* Nothing to do */
2356 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2357 return VINF_SUCCESS;
2358}
2359
2360
2361int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2362{
2363 /* Nothing to do */
2364 NOREF(pThis); NOREF(pvIfData);
2365 return VINF_SUCCESS;
2366}
2367
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette