VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 35554

Last change on this file since 35554 was 35554, checked in by vboxsync, 14 years ago

netflt: hard_start_xmit override support for pre-2.6.29 kernels

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.2 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 35554 2011-01-14 08:46:18Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/vmm/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53#ifdef CONFIG_NET_SCHED
54# define VBOXNETFLT_WITH_QDISC /* Comment this out to disable qdisc support */
55# ifdef VBOXNETFLT_WITH_QDISC
56# include <net/pkt_sched.h>
57# endif /* VBOXNETFLT_WITH_QDISC */
58#endif
59
60
61/*******************************************************************************
62* Defined Constants And Macros *
63*******************************************************************************/
64#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
65#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
66#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
67# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
68#endif
69
70#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
71# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
72# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
73#else
74# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
75# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
76#endif
77
78#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
79# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
80#else
81# define CHECKSUM_PARTIAL CHECKSUM_HW
82# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
83# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
84# else
85# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
86# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
87# else
88# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
89# endif
90/* Versions prior 2.6.10 use stats for both bstats and qstats */
91# define bstats stats
92# define qstats stats
93# endif
94#endif
95
96#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)
97static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
98{
99 kfree_skb(skb);
100 sch->stats.drops++;
101
102 return NET_XMIT_DROP;
103}
104#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) */
105
106#ifndef NET_IP_ALIGN
107# define NET_IP_ALIGN 2
108#endif
109
110#if 0
111/** Create scatter / gather segments for fragments. When not used, we will
112 * linearize the socket buffer before creating the internal networking SG. */
113# define VBOXNETFLT_SG_SUPPORT 1
114#endif
115
116#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
117/** Indicates that the linux kernel may send us GSO frames. */
118# define VBOXNETFLT_WITH_GSO 1
119
120/** This enables or disables the transmitting of GSO frame from the internal
121 * network and to the host. */
122# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
123
124# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
125/** This enables or disables the transmitting of GSO frame from the internal
126 * network and to the wire. */
127# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
128# endif
129
130/** This enables or disables the forwarding/flooding of GSO frame from the host
131 * to the internal network. */
132# define VBOXNETFLT_WITH_GSO_RECV 1
133
134#endif
135
136#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
137/** This enables or disables handling of GSO frames coming from the wire (GRO). */
138# define VBOXNETFLT_WITH_GRO 1
139#endif
140/*
141 * GRO support was backported to RHEL 5.4
142 */
143#ifdef RHEL_RELEASE_CODE
144# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
145# define VBOXNETFLT_WITH_GRO 1
146# endif
147#endif
148
149/*******************************************************************************
150* Internal Functions *
151*******************************************************************************/
152static int VBoxNetFltLinuxInit(void);
153static void VBoxNetFltLinuxUnload(void);
154static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
155
156
157/*******************************************************************************
158* Global Variables *
159*******************************************************************************/
160/**
161 * The (common) global data.
162 */
163static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
164
165module_init(VBoxNetFltLinuxInit);
166module_exit(VBoxNetFltLinuxUnload);
167
168MODULE_AUTHOR(VBOX_VENDOR);
169MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
170MODULE_LICENSE("GPL");
171#ifdef MODULE_VERSION
172MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
173#endif
174
175
176#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
177unsigned dev_get_flags(const struct net_device *dev)
178{
179 unsigned flags;
180
181 flags = (dev->flags & ~(IFF_PROMISC |
182 IFF_ALLMULTI |
183 IFF_RUNNING)) |
184 (dev->gflags & (IFF_PROMISC |
185 IFF_ALLMULTI));
186
187 if (netif_running(dev) && netif_carrier_ok(dev))
188 flags |= IFF_RUNNING;
189
190 return flags;
191}
192#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
193
194
195#ifdef VBOXNETFLT_WITH_QDISC
196//#define QDISC_LOG(x) printk x
197# define QDISC_LOG(x) do { } while (0)
198
199# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
200# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
201# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
202# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
203# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
204# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
205# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
206# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(queue, ops, parent)
207# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
208
209# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
210# define qdisc_dev(qdisc) (qdisc->dev)
211# define qdisc_pkt_len(skb) (skb->len)
212# define QDISC_GET(dev) (dev->qdisc_sleeping)
213# else
214# define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
215# endif
216
217# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
218# define QDISC_SAVED_NUM(dev) 1
219# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
220# define QDISC_SAVED_NUM(dev) dev->num_tx_queues
221# else
222# define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
223# endif
224
225# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
226# define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
227# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
228# define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
229 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
230# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
231# define QDISC_IS_BUSY(dev, qdisc) (qdisc_is_running(qdisc) || \
232 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
233# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
234
235struct VBoxNetQDiscPriv
236{
237 /** Pointer to the single child qdisc. */
238 struct Qdisc *pChild;
239 /*
240 * Technically it is possible to have different qdiscs for different TX
241 * queues so we have to save them all.
242 */
243 /** Pointer to the array of saved qdiscs. */
244 struct Qdisc **ppSaved;
245 /** Pointer to the net filter instance. */
246 PVBOXNETFLTINS pVBoxNetFlt;
247};
248typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
249
250//#define VBOXNETFLT_QDISC_ENQUEUE
251static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
252{
253 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
254 int rc;
255
256# ifdef VBOXNETFLT_QDISC_ENQUEUE
257 if (VALID_PTR(pPriv->pVBoxNetFlt))
258 {
259 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
260 PCRTNETETHERHDR pEtherHdr;
261 PINTNETTRUNKSWPORT pSwitchPort;
262 uint32_t cbHdrs = skb_headlen(skb);
263
264 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
265 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
266 if ( pEtherHdr
267 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
268 && VALID_PTR(pSwitchPort)
269 && cbHdrs >= 6)
270 {
271 /** @todo consider reference counting, etc. */
272 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
273 if (enmDecision == INTNETSWDECISION_INTNET)
274 {
275 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
276 pBuf->pkt_type = PACKET_OUTGOING;
277 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
278 qdisc_drop(skb, sch);
279 ++sch->bstats.packets;
280 sch->bstats.bytes += qdisc_pkt_len(skb);
281 return NET_XMIT_SUCCESS;
282 }
283 }
284 }
285# endif /* VBOXNETFLT_QDISC_ENQUEUE */
286 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
287 if (rc == NET_XMIT_SUCCESS)
288 {
289 ++sch->q.qlen;
290 ++sch->bstats.packets;
291 sch->bstats.bytes += qdisc_pkt_len(skb);
292 }
293 else
294 ++sch->qstats.drops;
295 return rc;
296}
297
298static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
299{
300 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
301# ifdef VBOXNETFLT_QDISC_ENQUEUE
302 --sch->q.qlen;
303 return pPriv->pChild->dequeue(pPriv->pChild);
304# else /* VBOXNETFLT_QDISC_ENQUEUE */
305 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
306 PCRTNETETHERHDR pEtherHdr;
307 PINTNETTRUNKSWPORT pSwitchPort;
308 struct sk_buff *pSkb;
309
310 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
311
312 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
313 {
314 struct sk_buff *pBuf;
315 INTNETSWDECISION enmDecision;
316 uint32_t cbHdrs;
317
318 --sch->q.qlen;
319
320 if (!VALID_PTR(pPriv->pVBoxNetFlt))
321 break;
322
323 cbHdrs = skb_headlen(pSkb);
324 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
325 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
326 if ( !pEtherHdr
327 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
328 || !VALID_PTR(pSwitchPort)
329 || cbHdrs < 6)
330 break;
331
332 /** @todo consider reference counting, etc. */
333 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
334 if (enmDecision != INTNETSWDECISION_INTNET)
335 break;
336
337 pBuf = skb_copy(pSkb, GFP_ATOMIC);
338 pBuf->pkt_type = PACKET_OUTGOING;
339 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
340 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
341 qdisc_drop(pSkb, sch);
342 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
343 pSkb->data[0], pSkb->data[1], pSkb->data[2],
344 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
345 }
346
347 return pSkb;
348# endif /* VBOXNETFLT_QDISC_ENQUEUE */
349}
350
351# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
352static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
353{
354 int rc;
355 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
356
357 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
358 if (rc == 0)
359 {
360 sch->q.qlen++;
361# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
362 sch->qstats.requeues++;
363# endif
364 }
365
366 return rc;
367}
368# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
369
370static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
371{
372 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
373 unsigned int cbLen;
374
375 if (pPriv->pChild->ops->drop)
376 {
377 cbLen = pPriv->pChild->ops->drop(pPriv->pChild);
378 if (cbLen != 0)
379 {
380 ++sch->qstats.drops;
381 --sch->q.qlen;
382 return cbLen;
383 }
384 }
385
386 return 0;
387}
388
389# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
390static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
391# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
392static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
393# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
394{
395 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
396 struct net_device *pDev = qdisc_dev(sch);
397
398 pPriv->pVBoxNetFlt = NULL;
399
400 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
401 GFP_KERNEL);
402 if (!pPriv->ppSaved)
403 return -ENOMEM;
404
405 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
406 &pfifo_qdisc_ops,
407 TC_H_MAKE(TC_H_MAJ(sch->handle),
408 TC_H_MIN(1)));
409 if (!pPriv->pChild)
410 {
411 kfree(pPriv->ppSaved);
412 pPriv->ppSaved = NULL;
413 return -ENOMEM;
414 }
415
416 return 0;
417}
418
419static void vboxNetFltQdiscReset(struct Qdisc *sch)
420{
421 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
422
423 qdisc_reset(pPriv->pChild);
424 sch->q.qlen = 0;
425 sch->qstats.backlog = 0;
426}
427
428static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
429{
430 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
431 struct net_device *pDev = qdisc_dev(sch);
432
433 qdisc_destroy(pPriv->pChild);
434 pPriv->pChild = NULL;
435
436 if (pPriv->ppSaved)
437 {
438 int i;
439 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
440 if (pPriv->ppSaved[i])
441 qdisc_destroy(pPriv->ppSaved[i]);
442 kfree(pPriv->ppSaved);
443 pPriv->ppSaved = NULL;
444 }
445}
446
447static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
448 struct Qdisc **ppOld)
449{
450 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
451
452 if (pNew == NULL)
453 pNew = &noop_qdisc;
454
455 sch_tree_lock(sch);
456 *ppOld = pPriv->pChild;
457 pPriv->pChild = pNew;
458# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
459 sch->q.qlen = 0;
460# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
461 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
462# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
463 qdisc_reset(*ppOld);
464 sch_tree_unlock(sch);
465
466 return 0;
467}
468
469static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
470{
471 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
472 return pPriv->pChild;
473}
474
475static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
476{
477 return 1;
478}
479
480static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
481{
482}
483
484# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
485static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
486 struct rtattr **tca, unsigned long *arg)
487# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
488static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
489 struct nlattr **tca, unsigned long *arg)
490# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
491{
492 return -ENOSYS;
493}
494
495static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
496{
497 return -ENOSYS;
498}
499
500static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
501{
502 if (!walker->stop) {
503 if (walker->count >= walker->skip)
504 if (walker->fn(sch, 1, walker) < 0) {
505 walker->stop = 1;
506 return;
507 }
508 walker->count++;
509 }
510}
511
512# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
513static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
514{
515 return NULL;
516}
517# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
518
519static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
520 struct sk_buff *skb, struct tcmsg *tcm)
521{
522 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
523
524 if (cl != 1)
525 return -ENOENT;
526
527 tcm->tcm_handle |= TC_H_MIN(1);
528 tcm->tcm_info = pPriv->pChild->handle;
529
530 return 0;
531}
532
533
534static struct Qdisc_class_ops g_VBoxNetFltClassOps =
535{
536 .graft = vboxNetFltClassGraft,
537 .leaf = vboxNetFltClassLeaf,
538 .get = vboxNetFltClassGet,
539 .put = vboxNetFltClassPut,
540 .change = vboxNetFltClassChange,
541 .delete = vboxNetFltClassDelete,
542 .walk = vboxNetFltClassWalk,
543# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
544 .tcf_chain = vboxNetFltClassFindTcf,
545# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
546 .dump = vboxNetFltClassDump,
547};
548
549
550static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
551 .cl_ops = &g_VBoxNetFltClassOps,
552 .id = "vboxnetflt",
553 .priv_size = sizeof(struct VBoxNetQDiscPriv),
554 .enqueue = vboxNetFltQdiscEnqueue,
555 .dequeue = vboxNetFltQdiscDequeue,
556# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
557 .requeue = vboxNetFltQdiscRequeue,
558# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
559 .peek = qdisc_peek_dequeued,
560# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
561 .drop = vboxNetFltQdiscDrop,
562 .init = vboxNetFltQdiscInit,
563 .reset = vboxNetFltQdiscReset,
564 .destroy = vboxNetFltQdiscDestroy,
565 .owner = THIS_MODULE
566};
567
568/*
569 * If our qdisc is already attached to the device (that means the user
570 * installed it from command line with 'tc' command) we simply update
571 * the pointer to vboxnetflt instance in qdisc's private structure.
572 * Otherwise we need to take some additional steps:
573 * - Create our qdisc;
574 * - Save all references to qdiscs;
575 * - Replace our child with the first qdisc reference;
576 * - Replace all references so they point to our qdisc.
577 */
578static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
579{
580# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
581 int i;
582# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
583 PVBOXNETQDISCPRIV pPriv;
584
585 struct Qdisc *pExisting = QDISC_GET(pDev);
586 /* Do not install our qdisc for devices with no TX queues */
587 if (!pExisting->enqueue)
588 return;
589 if (strcmp(pExisting->ops->id, "vboxnetflt"))
590 {
591 /* The existing qdisc is different from ours, let's create new one. */
592 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
593 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
594 if (!pNew)
595 return; // TODO: Error?
596
597 if (!try_module_get(THIS_MODULE))
598 {
599 /*
600 * This may cause a memory leak but calling qdisc_destroy()
601 * is not an option as it will call module_put().
602 */
603 return;
604 }
605 pPriv = qdisc_priv(pNew);
606
607 qdisc_destroy(pPriv->pChild);
608 pPriv->pChild = QDISC_GET(pDev);
609 atomic_inc(&pPriv->pChild->refcnt);
610 /*
611 * There is no need in deactivating the device or acquiring any locks
612 * prior changing qdiscs since we do not destroy the old qdisc.
613 * Atomic replacement of pointers is enough.
614 */
615 /*
616 * No need to change reference counters here as we merely move
617 * the pointer and the reference counter of the newly allocated
618 * qdisc is already 1.
619 */
620# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
621 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
622 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
623 ASMAtomicWritePtr(&pDev->qdisc, pNew);
624# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
625 for (i = 0; i < pDev->num_tx_queues; i++)
626 {
627 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
628
629 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
630 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
631 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
632 if (i)
633 atomic_inc(&pNew->refcnt);
634 }
635 /* Newer kernels store root qdisc in netdev structure as well. */
636# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
637 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
638 ASMAtomicWritePtr(&pDev->qdisc, pNew);
639 atomic_inc(&pNew->refcnt);
640# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
641# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
642 /* Sync the queue len with our child */
643 pNew->q.qlen = pPriv->pChild->q.qlen;
644 }
645 else
646 {
647 /* We already have vboxnetflt qdisc, let's use it. */
648 pPriv = qdisc_priv(pExisting);
649 }
650 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
651 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
652}
653
654static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
655{
656# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
657 int i;
658# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
659 PVBOXNETQDISCPRIV pPriv;
660 struct Qdisc *pQdisc, *pChild;
661 if (!pDev)
662 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
663 if (!VALID_PTR(pDev))
664 {
665 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
666 pDev);
667 return; // TODO: Consider returing an error
668 }
669
670
671 pQdisc = QDISC_GET(pDev);
672 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
673 {
674 if (pQdisc->enqueue)
675 {
676 /* Looks like the user has replaced our qdisc manually. */
677 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
678 pQdisc->ops->id);
679 }
680 return; // TODO: Consider returing an error
681 }
682
683 pPriv = qdisc_priv(pQdisc);
684 Assert(pPriv->pVBoxNetFlt == pThis);
685 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
686 pChild = ASMAtomicXchgPtrT(&pPriv->pChild, &noop_qdisc, struct Qdisc *);
687 qdisc_destroy(pChild); /* It won't be the last reference. */
688
689 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
690 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
691# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
692 /* Play it safe, make sure the qdisc is not being used. */
693 if (pPriv->ppSaved[0])
694 {
695 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
696 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
697 pPriv->ppSaved[0] = NULL;
698 while (QDISC_IS_BUSY(pDev, pQdisc))
699 yield();
700 qdisc_destroy(pQdisc); /* Destroy reference */
701 }
702# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
703 for (i = 0; i < pDev->num_tx_queues; i++)
704 {
705 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
706 if (pPriv->ppSaved[i])
707 {
708 Assert(pQueue->qdisc_sleeping == pQdisc);
709 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
710 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
711 pPriv->ppSaved[i] = NULL;
712 while (QDISC_IS_BUSY(pDev, pQdisc))
713 yield();
714 qdisc_destroy(pQdisc); /* Destroy reference */
715 }
716 }
717 /* Newer kernels store root qdisc in netdev structure as well. */
718# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
719 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
720 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
721 while (QDISC_IS_BUSY(pDev, pQdisc))
722 yield();
723 qdisc_destroy(pQdisc); /* Destroy reference */
724# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
725# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
726
727 /*
728 * At this point all references to our qdisc should be gone
729 * unless the user had installed it manually.
730 */
731 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
732}
733
734#endif /* VBOXNETFLT_WITH_QDISC */
735
736
737/**
738 * Initialize module.
739 *
740 * @returns appropriate status code.
741 */
742static int __init VBoxNetFltLinuxInit(void)
743{
744 int rc;
745 /*
746 * Initialize IPRT.
747 */
748 rc = RTR0Init(0);
749 if (RT_SUCCESS(rc))
750 {
751 Log(("VBoxNetFltLinuxInit\n"));
752
753 /*
754 * Initialize the globals and connect to the support driver.
755 *
756 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
757 * for establishing the connect to the support driver.
758 */
759 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
760 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
761 if (RT_SUCCESS(rc))
762 {
763#ifdef VBOXNETFLT_WITH_QDISC
764 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
765 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
766 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
767 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
768 if (rc)
769 {
770 LogRel(("VBoxNetFlt: Failed to registered qdisc: %d\n", rc));
771 return rc;
772 }
773#endif /* VBOXNETFLT_WITH_QDISC */
774 LogRel(("VBoxNetFlt: Successfully started.\n"));
775 return 0;
776 }
777
778 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
779 RTR0Term();
780 }
781 else
782 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
783
784 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
785 return -RTErrConvertToErrno(rc);
786}
787
788
789/**
790 * Unload the module.
791 *
792 * @todo We have to prevent this if we're busy!
793 */
794static void __exit VBoxNetFltLinuxUnload(void)
795{
796 int rc;
797 Log(("VBoxNetFltLinuxUnload\n"));
798 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
799
800#ifdef VBOXNETFLT_WITH_QDISC
801 unregister_qdisc(&g_VBoxNetFltQDiscOps);
802#endif /* VBOXNETFLT_WITH_QDISC */
803 /*
804 * Undo the work done during start (in reverse order).
805 */
806 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
807 AssertRC(rc); NOREF(rc);
808
809 RTR0Term();
810
811 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
812
813 Log(("VBoxNetFltLinuxUnload - done\n"));
814}
815
816
817/**
818 * Experiment where we filter traffic from the host to the internal network
819 * before it reaches the NIC driver.
820 *
821 * The current code uses a very ugly hack and only works on kernels using the
822 * net_device_ops (>= 2.6.29). It has been shown to give us a
823 * performance boost of 60-100% though. So, we have to find some less hacky way
824 * of getting this job done eventually.
825 *
826 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
827 */
828#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
829
830# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
831
832# include <linux/ethtool.h>
833
834typedef struct ethtool_ops OVR_OPSTYPE;
835# define OVR_OPS ethtool_ops
836# define OVR_XMIT pfnStartXmit
837
838# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
839
840typedef struct net_device_ops OVR_OPSTYPE;
841# define OVR_OPS netdev_ops
842# define OVR_XMIT pOrgOps->ndo_start_xmit
843
844# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
845
846/**
847 * The overridden net_device_ops of the device we're attached to.
848 *
849 * As there is no net_device_ops structure in pre-2.6.29 kernels we override
850 * ethtool_ops instead along with hard_start_xmit callback in net_device
851 * structure.
852 *
853 * This is a very dirty hack that was created to explore how much we can improve
854 * the host to guest transfers by not CC'ing the NIC. It turns out to be
855 * the only way to filter outgoing packets for devices without TX queue.
856 */
857typedef struct VBoxNetDeviceOpsOverride
858{
859 /** Our overridden ops. */
860 OVR_OPSTYPE Ops;
861 /** Magic word. */
862 uint32_t u32Magic;
863 /** Pointer to the original ops. */
864 OVR_OPSTYPE const *pOrgOps;
865# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
866 /** Pointer to the original hard_start_xmit function. */
867 int (*pfnStartXmit)(struct sk_buff *pSkb, struct net_device *pDev);
868# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
869 /** Pointer to the net filter instance. */
870 PVBOXNETFLTINS pVBoxNetFlt;
871 /** The number of filtered packages. */
872 uint64_t cFiltered;
873 /** The total number of packets */
874 uint64_t cTotal;
875} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
876/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
877#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
878
879/**
880 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
881 * because they belong on the internal network.
882 *
883 * @returns NETDEV_TX_XXX.
884 * @param pSkb The socket buffer to transmit.
885 * @param pDev The net device.
886 */
887static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
888{
889 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
890 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
891 PCRTNETETHERHDR pEtherHdr;
892 PINTNETTRUNKSWPORT pSwitchPort;
893 uint32_t cbHdrs;
894
895
896 /*
897 * Validate the override structure.
898 *
899 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
900 * to be production quality code, we would have to be much more
901 * careful here and avoid the race.
902 */
903 if ( !VALID_PTR(pOverride)
904 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
905 || !VALID_PTR(pOverride->pOrgOps))
906 {
907 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
908 dev_kfree_skb(pSkb);
909 return NETDEV_TX_OK;
910 }
911 pOverride->cTotal++;
912
913 /*
914 * Do the filtering base on the default OUI of our virtual NICs
915 *
916 * Note! In a real solution, we would ask the switch whether the
917 * destination MAC is 100% to be on the internal network and then
918 * drop it.
919 */
920 cbHdrs = skb_headlen(pSkb);
921 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
922 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
923 if ( pEtherHdr
924 && VALID_PTR(pOverride->pVBoxNetFlt)
925 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
926 && VALID_PTR(pSwitchPort)
927 && cbHdrs >= 6)
928 {
929 INTNETSWDECISION enmDecision;
930
931 /** @todo consider reference counting, etc. */
932 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
933 if (enmDecision == INTNETSWDECISION_INTNET)
934 {
935 dev_kfree_skb(pSkb);
936 pOverride->cFiltered++;
937 return NETDEV_TX_OK;
938 }
939 }
940
941 return pOverride->OVR_XMIT(pSkb, pDev);
942}
943
944/**
945 * Hooks the device ndo_start_xmit operation of the device.
946 *
947 * @param pThis The net filter instance.
948 * @param pDev The net device.
949 */
950static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
951{
952 PVBOXNETDEVICEOPSOVERRIDE pOverride;
953 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
954
955 pOverride = RTMemAlloc(sizeof(*pOverride));
956 if (!pOverride)
957 return;
958 pOverride->pOrgOps = pDev->OVR_OPS;
959 pOverride->Ops = *pDev->OVR_OPS;
960# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
961 pOverride->pfnStartXmit = pDev->hard_start_xmit;
962# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
963 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
964# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
965 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
966 pOverride->cTotal = 0;
967 pOverride->cFiltered = 0;
968 pOverride->pVBoxNetFlt = pThis;
969
970 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
971 ASMAtomicWritePtr((void * volatile *)&pDev->OVR_OPS, pOverride);
972# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
973 ASMAtomicXchgPtr((void * volatile *)&pDev->hard_start_xmit, vboxNetFltLinuxStartXmitFilter);
974# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
975 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
976}
977
978/**
979 * Undos what vboxNetFltLinuxHookDev did.
980 *
981 * @param pThis The net filter instance.
982 * @param pDev The net device. Can be NULL, in which case
983 * we'll try retrieve it from @a pThis.
984 */
985static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
986{
987 PVBOXNETDEVICEOPSOVERRIDE pOverride;
988 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
989
990 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
991 if (!pDev)
992 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
993 if (VALID_PTR(pDev))
994 {
995 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->OVR_OPS;
996 if ( VALID_PTR(pOverride)
997 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
998 && VALID_PTR(pOverride->pOrgOps)
999 )
1000 {
1001# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
1002 ASMAtomicWritePtr((void * volatile *)&pDev->hard_start_xmit, pOverride->pfnStartXmit);
1003# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
1004 ASMAtomicWritePtr((void * volatile *)&pDev->OVR_OPS, pOverride->pOrgOps);
1005 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
1006 }
1007 else
1008 pOverride = NULL;
1009 }
1010 else
1011 pOverride = NULL;
1012 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1013
1014 if (pOverride)
1015 {
1016 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
1017 RTMemFree(pOverride);
1018 }
1019}
1020
1021#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
1022
1023
1024/**
1025 * Reads and retains the host interface handle.
1026 *
1027 * @returns The handle, NULL if detached.
1028 * @param pThis
1029 */
1030DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
1031{
1032#if 0
1033 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1034 struct net_device *pDev = NULL;
1035
1036 Log(("vboxNetFltLinuxRetainNetDev\n"));
1037 /*
1038 * Be careful here to avoid problems racing the detached callback.
1039 */
1040 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1041 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
1042 {
1043 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1044 if (pDev)
1045 {
1046 dev_hold(pDev);
1047 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n",
1048 pDev, pDev->name,
1049#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1050 netdev_refcnt_read(pDev)
1051#else
1052 atomic_read(&pDev->refcnt)
1053#endif
1054 ));
1055 }
1056 }
1057 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1058
1059 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
1060 return pDev;
1061#else
1062 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1063#endif
1064}
1065
1066
1067/**
1068 * Release the host interface handle previously retained
1069 * by vboxNetFltLinuxRetainNetDev.
1070 *
1071 * @param pThis The instance.
1072 * @param pDev The vboxNetFltLinuxRetainNetDev
1073 * return value, NULL is fine.
1074 */
1075DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
1076{
1077#if 0
1078 Log(("vboxNetFltLinuxReleaseNetDev\n"));
1079 NOREF(pThis);
1080 if (pDev)
1081 {
1082 dev_put(pDev);
1083 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n",
1084 pDev, pDev->name,
1085#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1086 netdev_refcnt_read(pDev)
1087#else
1088 atomic_read(&pDev->refcnt)
1089#endif
1090 ));
1091 }
1092 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1093#endif
1094}
1095
1096#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1097#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1098
1099/**
1100 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1101 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1102 *
1103 * @returns true / false accordingly.
1104 * @param pBuf The sk_buff.
1105 */
1106DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1107{
1108 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1109}
1110
1111
1112/**
1113 * Internal worker that create a linux sk_buff for a
1114 * (scatter/)gather list.
1115 *
1116 * @returns Pointer to the sk_buff.
1117 * @param pThis The instance.
1118 * @param pSG The (scatter/)gather list.
1119 * @param fDstWire Set if the destination is the wire.
1120 */
1121static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1122{
1123 struct sk_buff *pPkt;
1124 struct net_device *pDev;
1125 unsigned fGsoType = 0;
1126
1127 if (pSG->cbTotal == 0)
1128 {
1129 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1130 return NULL;
1131 }
1132
1133 /** @todo We should use fragments mapping the SG buffers with large packets.
1134 * 256 bytes seems to be the a threshold used a lot for this. It
1135 * requires some nasty work on the intnet side though... */
1136 /*
1137 * Allocate a packet and copy over the data.
1138 */
1139 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1140 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1141 if (RT_UNLIKELY(!pPkt))
1142 {
1143 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1144 pSG->pvUserData = NULL;
1145 return NULL;
1146 }
1147 pPkt->dev = pDev;
1148 pPkt->ip_summed = CHECKSUM_NONE;
1149
1150 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1151 skb_reserve(pPkt, NET_IP_ALIGN);
1152
1153 /* Copy the segments. */
1154 skb_put(pPkt, pSG->cbTotal);
1155 IntNetSgRead(pSG, pPkt->data);
1156
1157#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1158 /*
1159 * Setup GSO if used by this packet.
1160 */
1161 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1162 {
1163 default:
1164 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1165 /* fall thru */
1166 case PDMNETWORKGSOTYPE_INVALID:
1167 fGsoType = 0;
1168 break;
1169 case PDMNETWORKGSOTYPE_IPV4_TCP:
1170 fGsoType = SKB_GSO_TCPV4;
1171 break;
1172 case PDMNETWORKGSOTYPE_IPV4_UDP:
1173 fGsoType = SKB_GSO_UDP;
1174 break;
1175 case PDMNETWORKGSOTYPE_IPV6_TCP:
1176 fGsoType = SKB_GSO_TCPV6;
1177 break;
1178 }
1179 if (fGsoType)
1180 {
1181 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1182
1183 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1184 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1185 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1186
1187 /*
1188 * We need to set checksum fields even if the packet goes to the host
1189 * directly as it may be immediately forwarded by IP layer @bugref{5020}.
1190 */
1191 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1192 pPkt->ip_summed = CHECKSUM_PARTIAL;
1193# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1194 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1195 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1196 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1197 else
1198 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1199# else
1200 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1201 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1202 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1203 else
1204 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1205# endif
1206 if (!fDstWire)
1207 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, PDMNETCSUMTYPE_PSEUDO);
1208 }
1209#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1210
1211 /*
1212 * Finish up the socket buffer.
1213 */
1214 pPkt->protocol = eth_type_trans(pPkt, pDev);
1215 if (fDstWire)
1216 {
1217 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1218
1219 /* Restore ethernet header back. */
1220 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1221 VBOX_SKB_RESET_MAC_HDR(pPkt);
1222 }
1223 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1224
1225 return pPkt;
1226}
1227
1228
1229/**
1230 * Initializes a SG list from an sk_buff.
1231 *
1232 * @returns Number of segments.
1233 * @param pThis The instance.
1234 * @param pBuf The sk_buff.
1235 * @param pSG The SG.
1236 * @param pvFrame The frame pointer, optional.
1237 * @param cSegs The number of segments allocated for the SG.
1238 * This should match the number in the mbuf exactly!
1239 * @param fSrc The source of the frame.
1240 * @param pGso Pointer to the GSO context if it's a GSO
1241 * internal network frame. NULL if regular frame.
1242 */
1243DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1244 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1245{
1246 int i;
1247 NOREF(pThis);
1248
1249 Assert(!skb_shinfo(pBuf)->frag_list);
1250
1251 if (!pGsoCtx)
1252 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1253 else
1254 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1255
1256#ifdef VBOXNETFLT_SG_SUPPORT
1257 pSG->aSegs[0].cb = skb_headlen(pBuf);
1258 pSG->aSegs[0].pv = pBuf->data;
1259 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1260
1261 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1262 {
1263 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1264 pSG->aSegs[i+1].cb = pFrag->size;
1265 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1266 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1267 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1268 }
1269 ++i;
1270
1271#else
1272 pSG->aSegs[0].cb = pBuf->len;
1273 pSG->aSegs[0].pv = pBuf->data;
1274 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1275 i = 1;
1276#endif
1277
1278 pSG->cSegsUsed = i;
1279
1280#ifdef PADD_RUNT_FRAMES_FROM_HOST
1281 /*
1282 * Add a trailer if the frame is too small.
1283 *
1284 * Since we're getting to the packet before it is framed, it has not
1285 * yet been padded. The current solution is to add a segment pointing
1286 * to a buffer containing all zeros and pray that works for all frames...
1287 */
1288 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1289 {
1290 static uint8_t const s_abZero[128] = {0};
1291
1292 AssertReturnVoid(i < cSegs);
1293
1294 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1295 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1296 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1297 pSG->cbTotal = 60;
1298 pSG->cSegsUsed++;
1299 Assert(i + 1 <= pSG->cSegsAlloc)
1300 }
1301#endif
1302
1303 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1304 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1305 for (i = 0; i < pSG->cSegsUsed; i++)
1306 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1307 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1308}
1309
1310/**
1311 * Packet handler,
1312 *
1313 * @returns 0 or EJUSTRETURN.
1314 * @param pThis The instance.
1315 * @param pMBuf The mbuf.
1316 * @param pvFrame The start of the frame, optional.
1317 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1318 * @param eProtocol The protocol.
1319 */
1320#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1321static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1322 struct net_device *pSkbDev,
1323 struct packet_type *pPacketType,
1324 struct net_device *pOrigDev)
1325#else
1326static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1327 struct net_device *pSkbDev,
1328 struct packet_type *pPacketType)
1329#endif
1330{
1331 PVBOXNETFLTINS pThis;
1332 struct net_device *pDev;
1333 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1334 pBuf, pSkbDev, pPacketType));
1335#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1336 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1337 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1338 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1339#else
1340 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1341 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1342#endif
1343 /*
1344 * Drop it immediately?
1345 */
1346 if (!pBuf)
1347 return 0;
1348
1349 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1350 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1351 if (pThis->u.s.pDev != pSkbDev)
1352 {
1353 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1354 return 0;
1355 }
1356
1357 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1358 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1359 {
1360 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1361 dev_kfree_skb(pBuf);
1362 return 0;
1363 }
1364
1365#ifndef VBOXNETFLT_SG_SUPPORT
1366 {
1367 /*
1368 * Get rid of fragmented packets, they cause too much trouble.
1369 */
1370 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1371 kfree_skb(pBuf);
1372 if (!pCopy)
1373 {
1374 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1375 return 0;
1376 }
1377 pBuf = pCopy;
1378# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1379 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1380 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1381 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1382# else
1383 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1384 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1385# endif
1386 }
1387#endif
1388
1389#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1390 /* Forward it to the internal network. */
1391 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1392#else
1393 /* Add the packet to transmit queue and schedule the bottom half. */
1394 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1395 schedule_work(&pThis->u.s.XmitTask);
1396 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1397 &pThis->u.s.XmitTask, pBuf));
1398#endif
1399
1400 /* It does not really matter what we return, it is ignored by the kernel. */
1401 return 0;
1402}
1403
1404/**
1405 * Calculate the number of INTNETSEG segments the socket buffer will need.
1406 *
1407 * @returns Segment count.
1408 * @param pBuf The socket buffer.
1409 */
1410DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1411{
1412#ifdef VBOXNETFLT_SG_SUPPORT
1413 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1414#else
1415 unsigned cSegs = 1;
1416#endif
1417#ifdef PADD_RUNT_FRAMES_FROM_HOST
1418 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1419 if (pBuf->len < 60)
1420 cSegs++;
1421#endif
1422 return cSegs;
1423}
1424
1425/**
1426 * Destroy the intnet scatter / gather buffer created by
1427 * vboxNetFltLinuxSkBufToSG.
1428 */
1429static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1430{
1431#ifdef VBOXNETFLT_SG_SUPPORT
1432 int i;
1433
1434 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1435 {
1436 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1437 kunmap(pSG->aSegs[i+1].pv);
1438 }
1439#endif
1440 NOREF(pSG);
1441}
1442
1443#ifdef LOG_ENABLED
1444/**
1445 * Logging helper.
1446 */
1447static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1448{
1449 uint8_t *pInt, *pExt;
1450 static int iPacketNo = 1;
1451 iPacketNo += iIncrement;
1452 if (fEgress)
1453 {
1454 pExt = pSG->aSegs[0].pv;
1455 pInt = pExt + 6;
1456 }
1457 else
1458 {
1459 pInt = pSG->aSegs[0].pv;
1460 pExt = pInt + 6;
1461 }
1462 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1463 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1464 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1465 fEgress ? "-->" : "<--", pszWhere,
1466 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1467 pSG->cbTotal, iPacketNo));
1468 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1469}
1470#else
1471# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1472#endif
1473
1474#ifdef VBOXNETFLT_WITH_GSO_RECV
1475
1476/**
1477 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1478 * GSO socket buffer without having to segment it.
1479 *
1480 * @returns true on success, false if needs segmenting.
1481 * @param pThis The net filter instance.
1482 * @param pSkb The GSO socket buffer.
1483 * @param fSrc The source.
1484 * @param pGsoCtx Where to return the GSO context on success.
1485 */
1486static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1487 PPDMNETWORKGSO pGsoCtx)
1488{
1489 PDMNETWORKGSOTYPE enmGsoType;
1490 uint16_t uEtherType;
1491 unsigned int cbTransport;
1492 unsigned int offTransport;
1493 unsigned int cbTransportHdr;
1494 unsigned uProtocol;
1495 union
1496 {
1497 RTNETIPV4 IPv4;
1498 RTNETIPV6 IPv6;
1499 RTNETTCP Tcp;
1500 uint8_t ab[40];
1501 uint16_t au16[40/2];
1502 uint32_t au32[40/4];
1503 } Buf;
1504
1505 /*
1506 * Check the GSO properties of the socket buffer and make sure it fits.
1507 */
1508 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1509 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1510 {
1511 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1512 return false;
1513 }
1514 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1515 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1516 {
1517 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1518 return false;
1519 }
1520 /*
1521 * It is possible to receive GSO packets from wire if GRO is enabled.
1522 */
1523 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1524 {
1525 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1526#ifdef VBOXNETFLT_WITH_GRO
1527 /*
1528 * The packet came from the wire and the driver has already consumed
1529 * mac header. We need to restore it back.
1530 */
1531 pSkb->mac_len = skb_network_header(pSkb) - skb_mac_header(pSkb);
1532 skb_push(pSkb, pSkb->mac_len);
1533 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1534 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1535#else /* !VBOXNETFLT_WITH_GRO */
1536 /* Older kernels didn't have GRO. */
1537 return false;
1538#endif /* !VBOXNETFLT_WITH_GRO */
1539 }
1540 else
1541 {
1542 /*
1543 * skb_gso_segment does the following. Do we need to do it as well?
1544 */
1545#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1546 skb_reset_mac_header(pSkb);
1547 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1548#else
1549 pSkb->mac.raw = pSkb->data;
1550 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1551#endif
1552 }
1553
1554 /*
1555 * Switch on the ethertype.
1556 */
1557 uEtherType = pSkb->protocol;
1558 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1559 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1560 {
1561 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1562 if (puEtherType)
1563 uEtherType = *puEtherType;
1564 }
1565 switch (uEtherType)
1566 {
1567 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1568 {
1569 unsigned int cbHdr;
1570 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1571 if (RT_UNLIKELY(!pIPv4))
1572 {
1573 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1574 return false;
1575 }
1576
1577 cbHdr = pIPv4->ip_hl * 4;
1578 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1579 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1580 || cbHdr > cbTransport ))
1581 {
1582 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1583 return false;
1584 }
1585 cbTransport -= cbHdr;
1586 offTransport = pSkb->mac_len + cbHdr;
1587 uProtocol = pIPv4->ip_p;
1588 if (uProtocol == RTNETIPV4_PROT_TCP)
1589 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1590 else if (uProtocol == RTNETIPV4_PROT_UDP)
1591 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1592 else /** @todo IPv6: 4to6 tunneling */
1593 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1594 break;
1595 }
1596
1597 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1598 {
1599 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1600 if (RT_UNLIKELY(!pIPv6))
1601 {
1602 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1603 return false;
1604 }
1605
1606 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1607 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1608 uProtocol = pIPv6->ip6_nxt;
1609 /** @todo IPv6: Dig our way out of the other headers. */
1610 if (uProtocol == RTNETIPV4_PROT_TCP)
1611 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1612 else if (uProtocol == RTNETIPV4_PROT_UDP)
1613 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1614 else
1615 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1616 break;
1617 }
1618
1619 default:
1620 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1621 return false;
1622 }
1623
1624 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1625 {
1626 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1627 return false;
1628 }
1629
1630 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1631 || offTransport + cbTransport > pSkb->len
1632 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1633 {
1634 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1635 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1636 return false;
1637 }
1638
1639 /*
1640 * Check the TCP/UDP bits.
1641 */
1642 if (uProtocol == RTNETIPV4_PROT_TCP)
1643 {
1644 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1645 if (RT_UNLIKELY(!pTcp))
1646 {
1647 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1648 return false;
1649 }
1650
1651 cbTransportHdr = pTcp->th_off * 4;
1652 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1653 || cbTransportHdr > cbTransport
1654 || offTransport + cbTransportHdr >= UINT8_MAX
1655 || offTransport + cbTransportHdr >= pSkb->len ))
1656 {
1657 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1658 return false;
1659 }
1660
1661 }
1662 else
1663 {
1664 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1665 cbTransportHdr = sizeof(RTNETUDP);
1666 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1667 || offTransport + cbTransportHdr >= pSkb->len ))
1668 {
1669 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1670 return false;
1671 }
1672 }
1673
1674 /*
1675 * We're good, init the GSO context.
1676 */
1677 pGsoCtx->u8Type = enmGsoType;
1678 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1679 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1680 pGsoCtx->offHdr1 = pSkb->mac_len;
1681 pGsoCtx->offHdr2 = offTransport;
1682 pGsoCtx->au8Unused[0] = 0;
1683 pGsoCtx->au8Unused[1] = 0;
1684
1685 return true;
1686}
1687
1688/**
1689 * Forward the socket buffer as a GSO internal network frame.
1690 *
1691 * @returns IPRT status code.
1692 * @param pThis The net filter instance.
1693 * @param pSkb The GSO socket buffer.
1694 * @param fSrc The source.
1695 * @param pGsoCtx Where to return the GSO context on success.
1696 */
1697static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1698{
1699 int rc;
1700 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1701 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1702 {
1703 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1704 if (RT_LIKELY(pSG))
1705 {
1706 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1707
1708 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1709 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1710
1711 vboxNetFltLinuxDestroySG(pSG);
1712 rc = VINF_SUCCESS;
1713 }
1714 else
1715 {
1716 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1717 rc = VERR_NO_MEMORY;
1718 }
1719 }
1720 else
1721 {
1722 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1723 rc = VERR_INTERNAL_ERROR_3;
1724 }
1725
1726 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1727 dev_kfree_skb(pSkb);
1728 return rc;
1729}
1730
1731#endif /* VBOXNETFLT_WITH_GSO_RECV */
1732
1733/**
1734 * Worker for vboxNetFltLinuxForwardToIntNet.
1735 *
1736 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1737 * @param pThis The net filter instance.
1738 * @param pBuf The socket buffer.
1739 * @param fSrc The source.
1740 */
1741static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1742{
1743 int rc;
1744 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1745 if (cSegs <= MAX_SKB_FRAGS + 1)
1746 {
1747 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1748 if (RT_LIKELY(pSG))
1749 {
1750 if (fSrc & INTNETTRUNKDIR_WIRE)
1751 {
1752 /*
1753 * The packet came from wire, ethernet header was removed by device driver.
1754 * Restore it.
1755 */
1756 skb_push(pBuf, ETH_HLEN);
1757 }
1758
1759 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1760
1761 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1762 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1763
1764 vboxNetFltLinuxDestroySG(pSG);
1765 rc = VINF_SUCCESS;
1766 }
1767 else
1768 {
1769 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1770 rc = VERR_NO_MEMORY;
1771 }
1772 }
1773 else
1774 {
1775 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1776 rc = VERR_INTERNAL_ERROR_3;
1777 }
1778
1779 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1780 dev_kfree_skb(pBuf);
1781 return rc;
1782}
1783
1784/**
1785 *
1786 * @param pBuf The socket buffer. This is consumed by this function.
1787 */
1788static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1789{
1790 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1791
1792#ifdef VBOXNETFLT_WITH_GSO
1793 if (skb_is_gso(pBuf))
1794 {
1795 PDMNETWORKGSO GsoCtx;
1796 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1797 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1798# ifdef VBOXNETFLT_WITH_GSO_RECV
1799 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1800 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1801 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1802 else
1803# endif
1804 {
1805 /* Need to segment the packet */
1806 struct sk_buff *pNext;
1807 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1808 if (IS_ERR(pSegment))
1809 {
1810 dev_kfree_skb(pBuf);
1811 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1812 return;
1813 }
1814
1815 for (; pSegment; pSegment = pNext)
1816 {
1817 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1818 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1819 pNext = pSegment->next;
1820 pSegment->next = 0;
1821 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1822 }
1823 dev_kfree_skb(pBuf);
1824 }
1825 }
1826 else
1827#endif /* VBOXNETFLT_WITH_GSO */
1828 {
1829 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1830 {
1831#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1832 /*
1833 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1834 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1835 * header length from the header itself and reconstruct 'h' pointer
1836 * to TCP (or whatever) header.
1837 */
1838 unsigned char *tmp = pBuf->h.raw;
1839 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1840 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1841#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1842 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1843 {
1844 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1845 dev_kfree_skb(pBuf);
1846 return;
1847 }
1848#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1849 /* Restore the original (wrong) pointer. */
1850 pBuf->h.raw = tmp;
1851#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1852 }
1853 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1854 }
1855}
1856
1857#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1858/**
1859 * Work queue handler that forwards the socket buffers queued by
1860 * vboxNetFltLinuxPacketHandler to the internal network.
1861 *
1862 * @param pWork The work queue.
1863 */
1864# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1865static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1866# else
1867static void vboxNetFltLinuxXmitTask(void *pWork)
1868# endif
1869{
1870 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1871 struct sk_buff *pBuf;
1872
1873 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1874
1875 /*
1876 * Active? Retain the instance and increment the busy counter.
1877 */
1878 if (vboxNetFltTryRetainBusyActive(pThis))
1879 {
1880 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1881 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1882
1883 vboxNetFltRelease(pThis, true /* fBusy */);
1884 }
1885 else
1886 {
1887 /** @todo Shouldn't we just drop the packets here? There is little point in
1888 * making them accumulate when the VM is paused and it'll only waste
1889 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1890 * before start draining the packets (goes for the intnet ring buf
1891 * too)? */
1892 }
1893}
1894#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1895
1896/**
1897 * Reports the GSO capabilities of the hardware NIC.
1898 *
1899 * @param pThis The net filter instance. The caller hold a
1900 * reference to this.
1901 */
1902static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1903{
1904#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1905 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1906 {
1907 struct net_device *pDev;
1908 PINTNETTRUNKSWPORT pSwitchPort;
1909 unsigned int fFeatures;
1910 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1911
1912 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1913
1914 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1915 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1916 if (pDev)
1917 fFeatures = pDev->features;
1918 else
1919 fFeatures = 0;
1920
1921 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1922
1923 if (pThis->pSwitchPort)
1924 {
1925 /* Set/update the GSO capabilities of the NIC. */
1926 uint32_t fGsoCapabilites = 0;
1927 if (fFeatures & NETIF_F_TSO)
1928 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1929 if (fFeatures & NETIF_F_TSO6)
1930 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1931# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1932 if (fFeatures & NETIF_F_UFO)
1933 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1934 if (fFeatures & NETIF_F_UFO)
1935 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1936# endif
1937 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1938 }
1939
1940 vboxNetFltRelease(pThis, true /*fBusy*/);
1941 }
1942#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1943}
1944
1945/**
1946 * Helper that determines whether the host (ignoreing us) is operating the
1947 * interface in promiscuous mode or not.
1948 */
1949static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1950{
1951 bool fRc = false;
1952 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1953 if (pDev)
1954 {
1955 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1956 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1957 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1958 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1959 }
1960 return fRc;
1961}
1962
1963#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1964/**
1965 * Helper for detecting TAP devices.
1966 */
1967static bool vboxNetFltIsTapDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1968{
1969 if (pDev->ethtool_ops && pDev->ethtool_ops->get_drvinfo)
1970 {
1971 struct ethtool_drvinfo Info;
1972
1973 memset(&Info, 0, sizeof(Info));
1974 Info.cmd = ETHTOOL_GDRVINFO;
1975 pDev->ethtool_ops->get_drvinfo(pDev, &Info);
1976 Log3(("vboxNetFltIsTapDevice: driver=%s version=%s bus_info=%s\n",
1977 Info.driver, Info.version, Info.bus_info));
1978
1979 return !strncmp(Info.driver, "tun", 4)
1980 && !strncmp(Info.bus_info, "tap", 4);
1981 }
1982
1983 return false;
1984}
1985
1986/**
1987 * Helper for updating the link state of TAP devices.
1988 * Only TAP devices are affected.
1989 */
1990static void vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
1991{
1992 if (vboxNetFltIsTapDevice(pThis, pDev))
1993 {
1994 Log3(("vboxNetFltSetTapLinkState: bringing %s tap device link state\n",
1995 fLinkUp ? "up" : "down"));
1996 netif_tx_lock_bh(pDev);
1997 if (fLinkUp)
1998 netif_carrier_on(pDev);
1999 else
2000 netif_carrier_off(pDev);
2001 netif_tx_unlock_bh(pDev);
2002 }
2003}
2004#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2005DECLINLINE(void) vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
2006{
2007 /* Nothing to do for pre-2.6.36 kernels. */
2008}
2009#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
2010
2011/**
2012 * Internal worker for vboxNetFltLinuxNotifierCallback.
2013 *
2014 * @returns VBox status code.
2015 * @param pThis The instance.
2016 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
2017 * flood the release log.
2018 */
2019static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
2020{
2021 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2022 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
2023
2024 /*
2025 * Retain and store the device.
2026 */
2027 dev_hold(pDev);
2028
2029 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2030 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
2031 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2032
2033 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n",
2034 pDev, pDev->name,
2035#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2036 netdev_refcnt_read(pDev)
2037#else
2038 atomic_read(&pDev->refcnt)
2039#endif
2040 ));
2041 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2042 pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2043
2044 /* Get the mac address while we still have a valid net_device reference. */
2045 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
2046
2047 /*
2048 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
2049 */
2050 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
2051 pThis->u.s.PacketType.dev = pDev;
2052 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
2053 dev_add_pack(&pThis->u.s.PacketType);
2054
2055#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2056 vboxNetFltLinuxHookDev(pThis, pDev);
2057#endif
2058#ifdef VBOXNETFLT_WITH_QDISC
2059 vboxNetFltLinuxQdiscInstall(pThis, pDev);
2060#endif /* VBOXNETFLT_WITH_QDISC */
2061
2062 /*
2063 * If attaching to TAP interface we need to bring the link state up
2064 * starting from 2.6.36 kernel.
2065 */
2066 vboxNetFltSetTapLinkState(pThis, pDev, true);
2067
2068 /*
2069 * Set indicators that require the spinlock. Be abit paranoid about racing
2070 * the device notification handle.
2071 */
2072 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2073 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2074 if (pDev)
2075 {
2076 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
2077 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
2078 pDev = NULL; /* don't dereference it */
2079 }
2080 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2081 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
2082
2083 /*
2084 * If the above succeeded report GSO capabilities, if not undo and
2085 * release the device.
2086 */
2087 if (!pDev)
2088 {
2089 Assert(pThis->pSwitchPort);
2090 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
2091 {
2092 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2093 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
2094 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
2095 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
2096 vboxNetFltRelease(pThis, true /*fBusy*/);
2097 }
2098 }
2099 else
2100 {
2101#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2102 vboxNetFltLinuxUnhookDev(pThis, pDev);
2103#endif
2104#ifdef VBOXNETFLT_WITH_QDISC
2105 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2106#endif /* VBOXNETFLT_WITH_QDISC */
2107 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2108 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2109 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2110 dev_put(pDev);
2111 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n",
2112 pDev, pDev->name,
2113#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2114 netdev_refcnt_read(pDev)
2115#else
2116 atomic_read(&pDev->refcnt)
2117#endif
2118 ));
2119 }
2120
2121 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
2122 return VINF_SUCCESS;
2123}
2124
2125
2126static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
2127{
2128 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2129
2130 Assert(!pThis->fDisconnectedFromHost);
2131
2132#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2133 vboxNetFltLinuxUnhookDev(pThis, pDev);
2134#endif
2135#ifdef VBOXNETFLT_WITH_QDISC
2136 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2137#endif /* VBOXNETFLT_WITH_QDISC */
2138
2139 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2140 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
2141 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
2142 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2143 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2144
2145 dev_remove_pack(&pThis->u.s.PacketType);
2146#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2147 skb_queue_purge(&pThis->u.s.XmitQueue);
2148#endif
2149 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2150 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n",
2151 pDev, pDev->name,
2152#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2153 netdev_refcnt_read(pDev)
2154#else
2155 atomic_read(&pDev->refcnt)
2156#endif
2157 ));
2158 dev_put(pDev);
2159
2160 return NOTIFY_OK;
2161}
2162
2163static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2164{
2165 /* Check if we are not suspended and promiscuous mode has not been set. */
2166 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2167 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2168 {
2169 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2170 dev_set_promiscuity(pDev, 1);
2171 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2172 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2173 }
2174 else
2175 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2176 return NOTIFY_OK;
2177}
2178
2179static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2180{
2181 /* Undo promiscuous mode if we has set it. */
2182 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2183 {
2184 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2185 dev_set_promiscuity(pDev, -1);
2186 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2187 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2188 }
2189 else
2190 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2191 return NOTIFY_OK;
2192}
2193
2194#ifdef LOG_ENABLED
2195/** Stringify the NETDEV_XXX constants. */
2196static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2197{
2198 const char *pszEvent = "NETDRV_<unknown>";
2199 switch (ulEventType)
2200 {
2201 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2202 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2203 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2204 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2205 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2206 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2207 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2208 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2209 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2210 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2211# ifdef NETDEV_FEAT_CHANGE
2212 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2213# endif
2214 }
2215 return pszEvent;
2216}
2217#endif /* LOG_ENABLED */
2218
2219/**
2220 * Callback for listening to netdevice events.
2221 *
2222 * This works the rediscovery, clean up on unregistration, promiscuity on
2223 * up/down, and GSO feature changes from ethtool.
2224 *
2225 * @returns NOTIFY_OK
2226 * @param self Pointer to our notifier registration block.
2227 * @param ulEventType The event.
2228 * @param ptr Event specific, but it is usually the device it
2229 * relates to.
2230 */
2231static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2232
2233{
2234 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2235 struct net_device *pDev = (struct net_device *)ptr;
2236 int rc = NOTIFY_OK;
2237
2238 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2239 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2240 if ( ulEventType == NETDEV_REGISTER
2241 && !strcmp(pDev->name, pThis->szName))
2242 {
2243 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2244 }
2245 else
2246 {
2247 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2248 if (pDev == ptr)
2249 {
2250 switch (ulEventType)
2251 {
2252 case NETDEV_UNREGISTER:
2253 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2254 break;
2255 case NETDEV_UP:
2256 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2257 break;
2258 case NETDEV_GOING_DOWN:
2259 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2260 break;
2261 case NETDEV_CHANGENAME:
2262 break;
2263#ifdef NETDEV_FEAT_CHANGE
2264 case NETDEV_FEAT_CHANGE:
2265 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2266 break;
2267#endif
2268 }
2269 }
2270 }
2271
2272 return rc;
2273}
2274
2275bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2276{
2277 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2278}
2279
2280int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2281{
2282 struct net_device * pDev;
2283 int err;
2284 int rc = VINF_SUCCESS;
2285 NOREF(pvIfData);
2286
2287 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2288
2289 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2290 if (pDev)
2291 {
2292 /*
2293 * Create a sk_buff for the gather list and push it onto the wire.
2294 */
2295 if (fDst & INTNETTRUNKDIR_WIRE)
2296 {
2297 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2298 if (pBuf)
2299 {
2300 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2301 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2302 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2303 err = dev_queue_xmit(pBuf);
2304 if (err)
2305 rc = RTErrConvertFromErrno(err);
2306 }
2307 else
2308 rc = VERR_NO_MEMORY;
2309 }
2310
2311 /*
2312 * Create a sk_buff for the gather list and push it onto the host stack.
2313 */
2314 if (fDst & INTNETTRUNKDIR_HOST)
2315 {
2316 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2317 if (pBuf)
2318 {
2319 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2320 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2321 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2322 err = netif_rx_ni(pBuf);
2323 if (err)
2324 rc = RTErrConvertFromErrno(err);
2325 }
2326 else
2327 rc = VERR_NO_MEMORY;
2328 }
2329
2330 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2331 }
2332
2333 return rc;
2334}
2335
2336
2337void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2338{
2339 struct net_device * pDev;
2340
2341 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2342 pThis, pThis->szName, fActive?"true":"false",
2343 pThis->fDisablePromiscuous?"true":"false"));
2344
2345 if (pThis->fDisablePromiscuous)
2346 return;
2347
2348 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2349 if (pDev)
2350 {
2351 /*
2352 * This api is a bit weird, the best reference is the code.
2353 *
2354 * Also, we have a bit or race conditions wrt the maintenance of
2355 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2356 */
2357#ifdef LOG_ENABLED
2358 u_int16_t fIf;
2359 unsigned const cPromiscBefore = pDev->promiscuity;
2360#endif
2361 if (fActive)
2362 {
2363 Assert(!pThis->u.s.fPromiscuousSet);
2364
2365 rtnl_lock();
2366 dev_set_promiscuity(pDev, 1);
2367 rtnl_unlock();
2368 pThis->u.s.fPromiscuousSet = true;
2369 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2370 }
2371 else
2372 {
2373 if (pThis->u.s.fPromiscuousSet)
2374 {
2375 rtnl_lock();
2376 dev_set_promiscuity(pDev, -1);
2377 rtnl_unlock();
2378 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2379 }
2380 pThis->u.s.fPromiscuousSet = false;
2381
2382#ifdef LOG_ENABLED
2383 fIf = dev_get_flags(pDev);
2384 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2385#endif
2386 }
2387
2388 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2389 }
2390}
2391
2392
2393int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2394{
2395#ifdef VBOXNETFLT_WITH_QDISC
2396 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2397#endif /* VBOXNETFLT_WITH_QDISC */
2398 /*
2399 * Remove packet handler when we get disconnected from internal switch as
2400 * we don't want the handler to forward packets to disconnected switch.
2401 */
2402 dev_remove_pack(&pThis->u.s.PacketType);
2403 return VINF_SUCCESS;
2404}
2405
2406
2407int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2408{
2409 /*
2410 * Report the GSO capabilities of the host and device (if connected).
2411 * Note! No need to mark ourselves busy here.
2412 */
2413 /** @todo duplicate work here now? Attach */
2414#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2415 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2416 0
2417 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2418 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2419# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2420 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2421 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2422# endif
2423 , INTNETTRUNKDIR_HOST);
2424
2425#endif
2426 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2427
2428 return VINF_SUCCESS;
2429}
2430
2431
2432void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2433{
2434 struct net_device *pDev;
2435 bool fRegistered;
2436 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2437
2438#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2439 vboxNetFltLinuxUnhookDev(pThis, NULL);
2440#endif
2441
2442 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2443 * unlikely, but none the less). Since it doesn't actually update the
2444 * state (just reads it), it is likely to panic in some interesting
2445 * ways. */
2446
2447 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2448 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2449 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2450 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2451
2452 if (fRegistered)
2453 {
2454 vboxNetFltSetTapLinkState(pThis, pDev, false);
2455
2456#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2457 skb_queue_purge(&pThis->u.s.XmitQueue);
2458#endif
2459 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2460 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n",
2461 pDev, pDev->name,
2462#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2463 netdev_refcnt_read(pDev)
2464#else
2465 atomic_read(&pDev->refcnt)
2466#endif
2467 ));
2468 dev_put(pDev);
2469 }
2470 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2471 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2472 module_put(THIS_MODULE);
2473}
2474
2475
2476int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2477{
2478 int err;
2479 NOREF(pvContext);
2480
2481 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2482 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2483 if (err)
2484 return VERR_INTNET_FLT_IF_FAILED;
2485 if (!pThis->u.s.fRegistered)
2486 {
2487 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2488 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2489 return VERR_INTNET_FLT_IF_NOT_FOUND;
2490 }
2491
2492 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2493 if ( pThis->fDisconnectedFromHost
2494 || !try_module_get(THIS_MODULE))
2495 return VERR_INTNET_FLT_IF_FAILED;
2496
2497 return VINF_SUCCESS;
2498}
2499
2500int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2501{
2502 /*
2503 * Init the linux specific members.
2504 */
2505 pThis->u.s.pDev = NULL;
2506 pThis->u.s.fRegistered = false;
2507 pThis->u.s.fPromiscuousSet = false;
2508 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2509#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2510 skb_queue_head_init(&pThis->u.s.XmitQueue);
2511# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2512 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2513# else
2514 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2515# endif
2516#endif
2517
2518 return VINF_SUCCESS;
2519}
2520
2521
2522void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2523{
2524 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2525}
2526
2527
2528int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2529{
2530 /* Nothing to do */
2531 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2532 return VINF_SUCCESS;
2533}
2534
2535
2536int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2537{
2538 /* Nothing to do */
2539 NOREF(pThis); NOREF(pvIfData);
2540 return VINF_SUCCESS;
2541}
2542
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette