VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 35499

Last change on this file since 35499 was 35382, checked in by vboxsync, 14 years ago

vboxnetflt: fix for host panic when bridging to devices with no TX queue

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 85.7 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 35382 2010-12-30 16:17:50Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
22#define VBOXNETFLT_LINUX_NO_XMIT_QUEUE
23#include "the-linux-kernel.h"
24#include "version-generated.h"
25#include "product-generated.h"
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/rtnetlink.h>
29#include <linux/miscdevice.h>
30#include <linux/ip.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <VBox/intnetinline.h>
35#include <VBox/vmm/pdmnetinline.h>
36#include <VBox/param.h>
37#include <iprt/alloca.h>
38#include <iprt/assert.h>
39#include <iprt/spinlock.h>
40#include <iprt/semaphore.h>
41#include <iprt/initterm.h>
42#include <iprt/process.h>
43#include <iprt/mem.h>
44#include <iprt/net.h>
45#include <iprt/log.h>
46#include <iprt/mp.h>
47#include <iprt/mem.h>
48#include <iprt/time.h>
49
50#define VBOXNETFLT_OS_SPECFIC 1
51#include "../VBoxNetFltInternal.h"
52
53#ifdef CONFIG_NET_SCHED
54# define VBOXNETFLT_WITH_QDISC /* Comment this out to disable qdisc support */
55# ifdef VBOXNETFLT_WITH_QDISC
56# include <net/pkt_sched.h>
57# endif /* VBOXNETFLT_WITH_QDISC */
58#endif
59
60
61/*******************************************************************************
62* Defined Constants And Macros *
63*******************************************************************************/
64#define VBOX_FLT_NB_TO_INST(pNB) RT_FROM_MEMBER(pNB, VBOXNETFLTINS, u.s.Notifier)
65#define VBOX_FLT_PT_TO_INST(pPT) RT_FROM_MEMBER(pPT, VBOXNETFLTINS, u.s.PacketType)
66#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
67# define VBOX_FLT_XT_TO_INST(pXT) RT_FROM_MEMBER(pXT, VBOXNETFLTINS, u.s.XmitTask)
68#endif
69
70#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
71# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
72# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
73#else
74# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
75# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
76#endif
77
78#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
79# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
80#else
81# define CHECKSUM_PARTIAL CHECKSUM_HW
82# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
83# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
84# else
85# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
86# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
87# else
88# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
89# endif
90/* Versions prior 2.6.10 use stats for both bstats and qstats */
91# define bstats stats
92# define qstats stats
93# endif
94#endif
95
96#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13)
97static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
98{
99 kfree_skb(skb);
100 sch->stats.drops++;
101
102 return NET_XMIT_DROP;
103}
104#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) */
105
106#ifndef NET_IP_ALIGN
107# define NET_IP_ALIGN 2
108#endif
109
110#if 0
111/** Create scatter / gather segments for fragments. When not used, we will
112 * linearize the socket buffer before creating the internal networking SG. */
113# define VBOXNETFLT_SG_SUPPORT 1
114#endif
115
116#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
117/** Indicates that the linux kernel may send us GSO frames. */
118# define VBOXNETFLT_WITH_GSO 1
119
120/** This enables or disables the transmitting of GSO frame from the internal
121 * network and to the host. */
122# define VBOXNETFLT_WITH_GSO_XMIT_HOST 1
123
124# if 0 /** @todo This is currently disable because it causes performance loss of 5-10%. */
125/** This enables or disables the transmitting of GSO frame from the internal
126 * network and to the wire. */
127# define VBOXNETFLT_WITH_GSO_XMIT_WIRE 1
128# endif
129
130/** This enables or disables the forwarding/flooding of GSO frame from the host
131 * to the internal network. */
132# define VBOXNETFLT_WITH_GSO_RECV 1
133
134#endif
135
136#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
137/** This enables or disables handling of GSO frames coming from the wire (GRO). */
138# define VBOXNETFLT_WITH_GRO 1
139#endif
140/*
141 * GRO support was backported to RHEL 5.4
142 */
143#ifdef RHEL_RELEASE_CODE
144# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 4)
145# define VBOXNETFLT_WITH_GRO 1
146# endif
147#endif
148
149/*******************************************************************************
150* Internal Functions *
151*******************************************************************************/
152static int VBoxNetFltLinuxInit(void);
153static void VBoxNetFltLinuxUnload(void);
154static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf);
155
156
157/*******************************************************************************
158* Global Variables *
159*******************************************************************************/
160/**
161 * The (common) global data.
162 */
163static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
164
165module_init(VBoxNetFltLinuxInit);
166module_exit(VBoxNetFltLinuxUnload);
167
168MODULE_AUTHOR(VBOX_VENDOR);
169MODULE_DESCRIPTION(VBOX_PRODUCT " Network Filter Driver");
170MODULE_LICENSE("GPL");
171#ifdef MODULE_VERSION
172MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
173#endif
174
175
176#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
177unsigned dev_get_flags(const struct net_device *dev)
178{
179 unsigned flags;
180
181 flags = (dev->flags & ~(IFF_PROMISC |
182 IFF_ALLMULTI |
183 IFF_RUNNING)) |
184 (dev->gflags & (IFF_PROMISC |
185 IFF_ALLMULTI));
186
187 if (netif_running(dev) && netif_carrier_ok(dev))
188 flags |= IFF_RUNNING;
189
190 return flags;
191}
192#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
193
194
195#ifdef VBOXNETFLT_WITH_QDISC
196//#define QDISC_LOG(x) printk x
197# define QDISC_LOG(x) do { } while (0)
198
199# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
200# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops)
201# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
202# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, ops, parent)
203# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
204# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(dev, queue, ops, parent)
205# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
206# define QDISC_CREATE(dev, queue, ops, parent) qdisc_create_dflt(queue, ops, parent)
207# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) */
208
209# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
210# define qdisc_dev(qdisc) (qdisc->dev)
211# define qdisc_pkt_len(skb) (skb->len)
212# define QDISC_GET(dev) (dev->qdisc_sleeping)
213# else
214# define QDISC_GET(dev) (netdev_get_tx_queue(dev, 0)->qdisc_sleeping)
215# endif
216
217# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
218# define QDISC_SAVED_NUM(dev) 1
219# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
220# define QDISC_SAVED_NUM(dev) dev->num_tx_queues
221# else
222# define QDISC_SAVED_NUM(dev) dev->num_tx_queues+1
223# endif
224
225# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
226# define QDISC_IS_BUSY(dev, qdisc) test_bit(__LINK_STATE_SCHED, &dev->state)
227# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
228# define QDISC_IS_BUSY(dev, qdisc) (test_bit(__QDISC_STATE_RUNNING, &qdisc->state) || \
229 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
230# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
231# define QDISC_IS_BUSY(dev, qdisc) (qdisc_is_running(qdisc) || \
232 test_bit(__QDISC_STATE_SCHED, &qdisc->state))
233# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */
234
235struct VBoxNetQDiscPriv
236{
237 /** Pointer to the single child qdisc. */
238 struct Qdisc *pChild;
239 /*
240 * Technically it is possible to have different qdiscs for different TX
241 * queues so we have to save them all.
242 */
243 /** Pointer to the array of saved qdiscs. */
244 struct Qdisc **ppSaved;
245 /** Pointer to the net filter instance. */
246 PVBOXNETFLTINS pVBoxNetFlt;
247};
248typedef struct VBoxNetQDiscPriv *PVBOXNETQDISCPRIV;
249
250//#define VBOXNETFLT_QDISC_ENQUEUE
251static int vboxNetFltQdiscEnqueue(struct sk_buff *skb, struct Qdisc *sch)
252{
253 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
254 int rc;
255
256# ifdef VBOXNETFLT_QDISC_ENQUEUE
257 if (VALID_PTR(pPriv->pVBoxNetFlt))
258 {
259 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
260 PCRTNETETHERHDR pEtherHdr;
261 PINTNETTRUNKSWPORT pSwitchPort;
262 uint32_t cbHdrs = skb_headlen(skb);
263
264 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
265 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(skb, 0, cbHdrs, &abHdrBuf[0]);
266 if ( pEtherHdr
267 && (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) != NULL
268 && VALID_PTR(pSwitchPort)
269 && cbHdrs >= 6)
270 {
271 /** @todo consider reference counting, etc. */
272 INTNETSWDECISION enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
273 if (enmDecision == INTNETSWDECISION_INTNET)
274 {
275 struct sk_buff *pBuf = skb_copy(skb, GFP_ATOMIC);
276 pBuf->pkt_type = PACKET_OUTGOING;
277 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
278 qdisc_drop(skb, sch);
279 ++sch->bstats.packets;
280 sch->bstats.bytes += qdisc_pkt_len(skb);
281 return NET_XMIT_SUCCESS;
282 }
283 }
284 }
285# endif /* VBOXNETFLT_QDISC_ENQUEUE */
286 rc = pPriv->pChild->enqueue(skb, pPriv->pChild);
287 if (rc == NET_XMIT_SUCCESS)
288 {
289 ++sch->q.qlen;
290 ++sch->bstats.packets;
291 sch->bstats.bytes += qdisc_pkt_len(skb);
292 }
293 else
294 ++sch->qstats.drops;
295 return rc;
296}
297
298static struct sk_buff *vboxNetFltQdiscDequeue(struct Qdisc *sch)
299{
300 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
301# ifdef VBOXNETFLT_QDISC_ENQUEUE
302 --sch->q.qlen;
303 return pPriv->pChild->dequeue(pPriv->pChild);
304# else /* VBOXNETFLT_QDISC_ENQUEUE */
305 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
306 PCRTNETETHERHDR pEtherHdr;
307 PINTNETTRUNKSWPORT pSwitchPort;
308 struct sk_buff *pSkb;
309
310 QDISC_LOG(("vboxNetFltDequeue: Enter pThis=%p\n", pPriv->pVBoxNetFlt));
311
312 while ((pSkb = pPriv->pChild->dequeue(pPriv->pChild)) != NULL)
313 {
314 struct sk_buff *pBuf;
315 INTNETSWDECISION enmDecision;
316 uint32_t cbHdrs;
317
318 --sch->q.qlen;
319
320 if (!VALID_PTR(pPriv->pVBoxNetFlt))
321 break;
322
323 cbHdrs = skb_headlen(pSkb);
324 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
325 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
326 if ( !pEtherHdr
327 || (pSwitchPort = pPriv->pVBoxNetFlt->pSwitchPort) == NULL
328 || !VALID_PTR(pSwitchPort)
329 || cbHdrs < 6)
330 break;
331
332 /** @todo consider reference counting, etc. */
333 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
334 if (enmDecision != INTNETSWDECISION_INTNET)
335 break;
336
337 pBuf = skb_copy(pSkb, GFP_ATOMIC);
338 pBuf->pkt_type = PACKET_OUTGOING;
339 QDISC_LOG(("vboxNetFltDequeue: pThis=%p\n", pPriv->pVBoxNetFlt));
340 vboxNetFltLinuxForwardToIntNet(pPriv->pVBoxNetFlt, pBuf);
341 qdisc_drop(pSkb, sch);
342 QDISC_LOG(("VBoxNetFlt: Packet for %02x:%02x:%02x:%02x:%02x:%02x dropped\n",
343 pSkb->data[0], pSkb->data[1], pSkb->data[2],
344 pSkb->data[3], pSkb->data[4], pSkb->data[5]));
345 }
346
347 return pSkb;
348# endif /* VBOXNETFLT_QDISC_ENQUEUE */
349}
350
351# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
352static int vboxNetFltQdiscRequeue(struct sk_buff *skb, struct Qdisc *sch)
353{
354 int rc;
355 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
356
357 rc = pPriv->pChild->ops->requeue(skb, pPriv->pChild);
358 if (rc == 0)
359 {
360 sch->q.qlen++;
361# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
362 sch->qstats.requeues++;
363# endif
364 }
365
366 return rc;
367}
368# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) */
369
370static unsigned int vboxNetFltQdiscDrop(struct Qdisc *sch)
371{
372 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
373 unsigned int cbLen;
374
375 if (pPriv->pChild->ops->drop)
376 {
377 cbLen = pPriv->pChild->ops->drop(pPriv->pChild);
378 if (cbLen != 0)
379 {
380 ++sch->qstats.drops;
381 --sch->q.qlen;
382 return cbLen;
383 }
384 }
385
386 return 0;
387}
388
389# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
390static int vboxNetFltQdiscInit(struct Qdisc *sch, struct rtattr *opt)
391# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
392static int vboxNetFltQdiscInit(struct Qdisc *sch, struct nlattr *opt)
393# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
394{
395 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
396 struct net_device *pDev = qdisc_dev(sch);
397
398 pPriv->pVBoxNetFlt = NULL;
399
400 pPriv->ppSaved = kcalloc(QDISC_SAVED_NUM(pDev), sizeof(pPriv->ppSaved[0]),
401 GFP_KERNEL);
402 if (!pPriv->ppSaved)
403 return -ENOMEM;
404
405 pPriv->pChild = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
406 &pfifo_qdisc_ops,
407 TC_H_MAKE(TC_H_MAJ(sch->handle),
408 TC_H_MIN(1)));
409 if (!pPriv->pChild)
410 {
411 kfree(pPriv->ppSaved);
412 pPriv->ppSaved = NULL;
413 return -ENOMEM;
414 }
415
416 return 0;
417}
418
419static void vboxNetFltQdiscReset(struct Qdisc *sch)
420{
421 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
422
423 qdisc_reset(pPriv->pChild);
424 sch->q.qlen = 0;
425 sch->qstats.backlog = 0;
426}
427
428static void vboxNetFltQdiscDestroy(struct Qdisc* sch)
429{
430 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
431 struct net_device *pDev = qdisc_dev(sch);
432
433 qdisc_destroy(pPriv->pChild);
434 pPriv->pChild = NULL;
435
436 if (pPriv->ppSaved)
437 {
438 int i;
439 for (i = 0; i < QDISC_SAVED_NUM(pDev); i++)
440 if (pPriv->ppSaved[i])
441 qdisc_destroy(pPriv->ppSaved[i]);
442 kfree(pPriv->ppSaved);
443 pPriv->ppSaved = NULL;
444 }
445}
446
447static int vboxNetFltClassGraft(struct Qdisc *sch, unsigned long arg, struct Qdisc *pNew,
448 struct Qdisc **ppOld)
449{
450 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
451
452 if (pNew == NULL)
453 pNew = &noop_qdisc;
454
455 sch_tree_lock(sch);
456 *ppOld = pPriv->pChild;
457 pPriv->pChild = pNew;
458# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
459 sch->q.qlen = 0;
460# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
461 qdisc_tree_decrease_qlen(*ppOld, (*ppOld)->q.qlen);
462# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */
463 qdisc_reset(*ppOld);
464 sch_tree_unlock(sch);
465
466 return 0;
467}
468
469static struct Qdisc *vboxNetFltClassLeaf(struct Qdisc *sch, unsigned long arg)
470{
471 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
472 return pPriv->pChild;
473}
474
475static unsigned long vboxNetFltClassGet(struct Qdisc *sch, u32 classid)
476{
477 return 1;
478}
479
480static void vboxNetFltClassPut(struct Qdisc *sch, unsigned long arg)
481{
482}
483
484# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
485static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
486 struct rtattr **tca, unsigned long *arg)
487# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
488static int vboxNetFltClassChange(struct Qdisc *sch, u32 classid, u32 parentid,
489 struct nlattr **tca, unsigned long *arg)
490# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
491{
492 return -ENOSYS;
493}
494
495static int vboxNetFltClassDelete(struct Qdisc *sch, unsigned long arg)
496{
497 return -ENOSYS;
498}
499
500static void vboxNetFltClassWalk(struct Qdisc *sch, struct qdisc_walker *walker)
501{
502 if (!walker->stop) {
503 if (walker->count >= walker->skip)
504 if (walker->fn(sch, 1, walker) < 0) {
505 walker->stop = 1;
506 return;
507 }
508 walker->count++;
509 }
510}
511
512# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
513static struct tcf_proto **vboxNetFltClassFindTcf(struct Qdisc *sch, unsigned long cl)
514{
515 return NULL;
516}
517# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
518
519static int vboxNetFltClassDump(struct Qdisc *sch, unsigned long cl,
520 struct sk_buff *skb, struct tcmsg *tcm)
521{
522 PVBOXNETQDISCPRIV pPriv = qdisc_priv(sch);
523
524 if (cl != 1)
525 return -ENOENT;
526
527 tcm->tcm_handle |= TC_H_MIN(1);
528 tcm->tcm_info = pPriv->pChild->handle;
529
530 return 0;
531}
532
533
534static struct Qdisc_class_ops g_VBoxNetFltClassOps =
535{
536 .graft = vboxNetFltClassGraft,
537 .leaf = vboxNetFltClassLeaf,
538 .get = vboxNetFltClassGet,
539 .put = vboxNetFltClassPut,
540 .change = vboxNetFltClassChange,
541 .delete = vboxNetFltClassDelete,
542 .walk = vboxNetFltClassWalk,
543# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
544 .tcf_chain = vboxNetFltClassFindTcf,
545# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) */
546 .dump = vboxNetFltClassDump,
547};
548
549
550static struct Qdisc_ops g_VBoxNetFltQDiscOps = {
551 .cl_ops = &g_VBoxNetFltClassOps,
552 .id = "vboxnetflt",
553 .priv_size = sizeof(struct VBoxNetQDiscPriv),
554 .enqueue = vboxNetFltQdiscEnqueue,
555 .dequeue = vboxNetFltQdiscDequeue,
556# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
557 .requeue = vboxNetFltQdiscRequeue,
558# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
559 .peek = qdisc_peek_dequeued,
560# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) */
561 .drop = vboxNetFltQdiscDrop,
562 .init = vboxNetFltQdiscInit,
563 .reset = vboxNetFltQdiscReset,
564 .destroy = vboxNetFltQdiscDestroy,
565 .owner = THIS_MODULE
566};
567
568/*
569 * If our qdisc is already attached to the device (that means the user
570 * installed it from command line with 'tc' command) we simply update
571 * the pointer to vboxnetflt instance in qdisc's private structure.
572 * Otherwise we need to take some additional steps:
573 * - Create our qdisc;
574 * - Save all references to qdiscs;
575 * - Replace our child with the first qdisc reference;
576 * - Replace all references so they point to our qdisc.
577 */
578static void vboxNetFltLinuxQdiscInstall(PVBOXNETFLTINS pThis, struct net_device *pDev)
579{
580# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
581 int i;
582# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
583 PVBOXNETQDISCPRIV pPriv;
584
585 struct Qdisc *pExisting = QDISC_GET(pDev);
586 /* Do not install our qdisc for devices with no TX queues */
587 if (!pExisting->enqueue)
588 return;
589 if (strcmp(pExisting->ops->id, "vboxnetflt"))
590 {
591 /* The existing qdisc is different from ours, let's create new one. */
592 struct Qdisc *pNew = QDISC_CREATE(pDev, netdev_get_tx_queue(pDev, 0),
593 &g_VBoxNetFltQDiscOps, TC_H_ROOT);
594 if (!pNew)
595 return; // TODO: Error?
596
597 if (!try_module_get(THIS_MODULE))
598 {
599 /*
600 * This may cause a memory leak but calling qdisc_destroy()
601 * is not an option as it will call module_put().
602 */
603 return;
604 }
605 pPriv = qdisc_priv(pNew);
606
607 qdisc_destroy(pPriv->pChild);
608 pPriv->pChild = QDISC_GET(pDev);
609 atomic_inc(&pPriv->pChild->refcnt);
610 /*
611 * There is no need in deactivating the device or acquiring any locks
612 * prior changing qdiscs since we do not destroy the old qdisc.
613 * Atomic replacement of pointers is enough.
614 */
615 /*
616 * No need to change reference counters here as we merely move
617 * the pointer and the reference counter of the newly allocated
618 * qdisc is already 1.
619 */
620# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
621 pPriv->ppSaved[0] = pDev->qdisc_sleeping;
622 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pNew);
623 ASMAtomicWritePtr(&pDev->qdisc, pNew);
624# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
625 for (i = 0; i < pDev->num_tx_queues; i++)
626 {
627 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
628
629 pPriv->ppSaved[i] = pQueue->qdisc_sleeping;
630 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pNew);
631 ASMAtomicWritePtr(&pQueue->qdisc, pNew);
632 if (i)
633 atomic_inc(&pNew->refcnt);
634 }
635 /* Newer kernels store root qdisc in netdev structure as well. */
636# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
637 pPriv->ppSaved[pDev->num_tx_queues] = pDev->qdisc;
638 ASMAtomicWritePtr(&pDev->qdisc, pNew);
639 atomic_inc(&pNew->refcnt);
640# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
641# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
642 /* Sync the queue len with our child */
643 pNew->q.qlen = pPriv->pChild->q.qlen;
644 }
645 else
646 {
647 /* We already have vboxnetflt qdisc, let's use it. */
648 pPriv = qdisc_priv(pExisting);
649 }
650 ASMAtomicWritePtr(&pPriv->pVBoxNetFlt, pThis);
651 QDISC_LOG(("vboxNetFltLinuxInstallQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
652}
653
654static void vboxNetFltLinuxQdiscRemove(PVBOXNETFLTINS pThis, struct net_device *pDev)
655{
656# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
657 int i;
658# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
659 PVBOXNETQDISCPRIV pPriv;
660 struct Qdisc *pQdisc, *pChild;
661 if (!pDev)
662 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
663 if (!VALID_PTR(pDev))
664 {
665 printk("VBoxNetFlt: Failed to detach qdisc, invalid device pointer: %p\n",
666 pDev);
667 return; // TODO: Consider returing an error
668 }
669
670
671 pQdisc = QDISC_GET(pDev);
672 if (strcmp(pQdisc->ops->id, "vboxnetflt"))
673 {
674 if (pQdisc->enqueue)
675 {
676 /* Looks like the user has replaced our qdisc manually. */
677 printk("VBoxNetFlt: Failed to detach qdisc, wrong qdisc: %s\n",
678 pQdisc->ops->id);
679 }
680 return; // TODO: Consider returing an error
681 }
682
683 pPriv = qdisc_priv(pQdisc);
684 Assert(pPriv->pVBoxNetFlt == pThis);
685 ASMAtomicWriteNullPtr(&pPriv->pVBoxNetFlt);
686 pChild = ASMAtomicXchgPtrT(&pPriv->pChild, &noop_qdisc, struct Qdisc *);
687 qdisc_destroy(pChild); /* It won't be the last reference. */
688
689 QDISC_LOG(("vboxNetFltLinuxQdiscRemove: refcnt=%d num_tx_queues=%d\n",
690 atomic_read(&pQdisc->refcnt), pDev->num_tx_queues));
691# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
692 /* Play it safe, make sure the qdisc is not being used. */
693 if (pPriv->ppSaved[0])
694 {
695 ASMAtomicWritePtr(&pDev->qdisc_sleeping, pPriv->ppSaved[0]);
696 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[0]);
697 pPriv->ppSaved[0] = NULL;
698 while (QDISC_IS_BUSY(pDev, pQdisc))
699 yield();
700 qdisc_destroy(pQdisc); /* Destroy reference */
701 }
702# else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
703 for (i = 0; i < pDev->num_tx_queues; i++)
704 {
705 struct netdev_queue *pQueue = netdev_get_tx_queue(pDev, i);
706 if (pPriv->ppSaved[i])
707 {
708 Assert(pQueue->qdisc_sleeping == pQdisc);
709 ASMAtomicWritePtr(&pQueue->qdisc_sleeping, pPriv->ppSaved[i]);
710 ASMAtomicWritePtr(&pQueue->qdisc, pPriv->ppSaved[i]);
711 pPriv->ppSaved[i] = NULL;
712 while (QDISC_IS_BUSY(pDev, pQdisc))
713 yield();
714 qdisc_destroy(pQdisc); /* Destroy reference */
715 }
716 }
717 /* Newer kernels store root qdisc in netdev structure as well. */
718# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
719 ASMAtomicWritePtr(&pDev->qdisc, pPriv->ppSaved[pDev->num_tx_queues]);
720 pPriv->ppSaved[pDev->num_tx_queues] = NULL;
721 while (QDISC_IS_BUSY(pDev, pQdisc))
722 yield();
723 qdisc_destroy(pQdisc); /* Destroy reference */
724# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
725# endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
726
727 /*
728 * At this point all references to our qdisc should be gone
729 * unless the user had installed it manually.
730 */
731 QDISC_LOG(("vboxNetFltLinuxRemoveQdisc: pThis=%p\n", pPriv->pVBoxNetFlt));
732}
733
734#endif /* VBOXNETFLT_WITH_QDISC */
735
736
737/**
738 * Initialize module.
739 *
740 * @returns appropriate status code.
741 */
742static int __init VBoxNetFltLinuxInit(void)
743{
744 int rc;
745 /*
746 * Initialize IPRT.
747 */
748 rc = RTR0Init(0);
749 if (RT_SUCCESS(rc))
750 {
751 Log(("VBoxNetFltLinuxInit\n"));
752
753 /*
754 * Initialize the globals and connect to the support driver.
755 *
756 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
757 * for establishing the connect to the support driver.
758 */
759 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
760 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
761 if (RT_SUCCESS(rc))
762 {
763#ifdef VBOXNETFLT_WITH_QDISC
764 /*memcpy(&g_VBoxNetFltQDiscOps, &pfifo_qdisc_ops, sizeof(g_VBoxNetFltQDiscOps));
765 strcpy(g_VBoxNetFltQDiscOps.id, "vboxnetflt");
766 g_VBoxNetFltQDiscOps.owner = THIS_MODULE;*/
767 rc = register_qdisc(&g_VBoxNetFltQDiscOps);
768 if (rc)
769 {
770 LogRel(("VBoxNetFlt: Failed to registered qdisc: %d\n", rc));
771 return rc;
772 }
773#endif /* VBOXNETFLT_WITH_QDISC */
774 LogRel(("VBoxNetFlt: Successfully started.\n"));
775 return 0;
776 }
777
778 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
779 RTR0Term();
780 }
781 else
782 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
783
784 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
785 return -RTErrConvertToErrno(rc);
786}
787
788
789/**
790 * Unload the module.
791 *
792 * @todo We have to prevent this if we're busy!
793 */
794static void __exit VBoxNetFltLinuxUnload(void)
795{
796 int rc;
797 Log(("VBoxNetFltLinuxUnload\n"));
798 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
799
800#ifdef VBOXNETFLT_WITH_QDISC
801 unregister_qdisc(&g_VBoxNetFltQDiscOps);
802#endif /* VBOXNETFLT_WITH_QDISC */
803 /*
804 * Undo the work done during start (in reverse order).
805 */
806 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
807 AssertRC(rc); NOREF(rc);
808
809 RTR0Term();
810
811 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
812
813 Log(("VBoxNetFltLinuxUnload - done\n"));
814}
815
816
817/**
818 * Experiment where we filter traffic from the host to the internal network
819 * before it reaches the NIC driver.
820 *
821 * The current code uses a very ugly hack and only works on kernels using the
822 * net_device_ops (>= 2.6.29). It has been shown to give us a
823 * performance boost of 60-100% though. So, we have to find some less hacky way
824 * of getting this job done eventually.
825 *
826 * #define VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
827 */
828#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
829
830/**
831 * The overridden net_device_ops of the device we're attached to.
832 *
833 * Requires Linux 2.6.29 or later.
834 *
835 * This is a very dirty hack that was create to explore how much we can improve
836 * the host to guest transfers by not CC'ing the NIC.
837 */
838typedef struct VBoxNetDeviceOpsOverride
839{
840 /** Our overridden ops. */
841 struct net_device_ops Ops;
842 /** Magic word. */
843 uint32_t u32Magic;
844 /** Pointer to the original ops. */
845 struct net_device_ops const *pOrgOps;
846 /** Pointer to the net filter instance. */
847 PVBOXNETFLTINS pVBoxNetFlt;
848 /** The number of filtered packages. */
849 uint64_t cFiltered;
850 /** The total number of packets */
851 uint64_t cTotal;
852} VBOXNETDEVICEOPSOVERRIDE, *PVBOXNETDEVICEOPSOVERRIDE;
853/** VBOXNETDEVICEOPSOVERRIDE::u32Magic value. */
854#define VBOXNETDEVICEOPSOVERRIDE_MAGIC UINT32_C(0x00c0ffee)
855
856/**
857 * ndo_start_xmit wrapper that drops packets that shouldn't go to the wire
858 * because they belong on the internal network.
859 *
860 * @returns NETDEV_TX_XXX.
861 * @param pSkb The socket buffer to transmit.
862 * @param pDev The net device.
863 */
864static int vboxNetFltLinuxStartXmitFilter(struct sk_buff *pSkb, struct net_device *pDev)
865{
866 PVBOXNETDEVICEOPSOVERRIDE pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
867 uint8_t abHdrBuf[sizeof(RTNETETHERHDR) + sizeof(uint32_t) + RTNETIPV4_MIN_LEN];
868 PCRTNETETHERHDR pEtherHdr;
869 PINTNETTRUNKSWPORT pSwitchPort;
870 uint32_t cbHdrs;
871
872
873 /*
874 * Validate the override structure.
875 *
876 * Note! We're racing vboxNetFltLinuxUnhookDev here. If this was supposed
877 * to be production quality code, we would have to be much more
878 * careful here and avoid the race.
879 */
880 if ( !VALID_PTR(pOverride)
881 || pOverride->u32Magic != VBOXNETDEVICEOPSOVERRIDE_MAGIC
882 || !VALID_PTR(pOverride->pOrgOps))
883 {
884 printk("vboxNetFltLinuxStartXmitFilter: bad override %p\n", pOverride);
885 dev_kfree_skb(pSkb);
886 return NETDEV_TX_OK;
887 }
888 pOverride->cTotal++;
889
890 /*
891 * Do the filtering base on the default OUI of our virtual NICs
892 *
893 * Note! In a real solution, we would ask the switch whether the
894 * destination MAC is 100% to be on the internal network and then
895 * drop it.
896 */
897 cbHdrs = skb_headlen(pSkb);
898 cbHdrs = RT_MIN(cbHdrs, sizeof(abHdrBuf));
899 pEtherHdr = (PCRTNETETHERHDR)skb_header_pointer(pSkb, 0, cbHdrs, &abHdrBuf[0]);
900 if ( pEtherHdr
901 && VALID_PTR(pOverride->pVBoxNetFlt)
902 && (pSwitchPort = pOverride->pVBoxNetFlt->pSwitchPort) != NULL
903 && VALID_PTR(pSwitchPort)
904 && cbHdrs >= 6)
905 {
906 INTNETSWDECISION enmDecision;
907
908 /** @todo consider reference counting, etc. */
909 enmDecision = pSwitchPort->pfnPreRecv(pSwitchPort, pEtherHdr, cbHdrs, INTNETTRUNKDIR_HOST);
910 if (enmDecision == INTNETSWDECISION_INTNET)
911 {
912 dev_kfree_skb(pSkb);
913 pOverride->cFiltered++;
914 return NETDEV_TX_OK;
915 }
916 }
917
918 return pOverride->pOrgOps->ndo_start_xmit(pSkb, pDev);
919}
920
921/**
922 * Hooks the device ndo_start_xmit operation of the device.
923 *
924 * @param pThis The net filter instance.
925 * @param pDev The net device.
926 */
927static void vboxNetFltLinuxHookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
928{
929 PVBOXNETDEVICEOPSOVERRIDE pOverride;
930 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
931
932 pOverride = RTMemAlloc(sizeof(*pOverride));
933 if (!pOverride)
934 return;
935 pOverride->pOrgOps = pDev->netdev_ops;
936 pOverride->Ops = *pDev->netdev_ops;
937 pOverride->Ops.ndo_start_xmit = vboxNetFltLinuxStartXmitFilter;
938 pOverride->u32Magic = VBOXNETDEVICEOPSOVERRIDE_MAGIC;
939 pOverride->cTotal = 0;
940 pOverride->cFiltered = 0;
941 pOverride->pVBoxNetFlt = pThis;
942
943 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* (this isn't necessary, but so what) */
944 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride);
945 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
946}
947
948/**
949 * Undos what vboxNetFltLinuxHookDev did.
950 *
951 * @param pThis The net filter instance.
952 * @param pDev The net device. Can be NULL, in which case
953 * we'll try retrieve it from @a pThis.
954 */
955static void vboxNetFltLinuxUnhookDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
956{
957 PVBOXNETDEVICEOPSOVERRIDE pOverride;
958 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
959
960 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
961 if (!pDev)
962 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
963 if (VALID_PTR(pDev))
964 {
965 pOverride = (PVBOXNETDEVICEOPSOVERRIDE)pDev->netdev_ops;
966 if ( VALID_PTR(pOverride)
967 && pOverride->u32Magic == VBOXNETDEVICEOPSOVERRIDE_MAGIC
968 && VALID_PTR(pOverride->pOrgOps)
969 )
970 {
971 ASMAtomicWritePtr((void * volatile *)&pDev->netdev_ops, pOverride->pOrgOps);
972 ASMAtomicWriteU32(&pOverride->u32Magic, 0);
973 }
974 else
975 pOverride = NULL;
976 }
977 else
978 pOverride = NULL;
979 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
980
981 if (pOverride)
982 {
983 printk("vboxnetflt: dropped %llu out of %llu packets\n", pOverride->cFiltered, pOverride->cTotal);
984 RTMemFree(pOverride);
985 }
986}
987
988#endif /* VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT */
989
990
991/**
992 * Reads and retains the host interface handle.
993 *
994 * @returns The handle, NULL if detached.
995 * @param pThis
996 */
997DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
998{
999#if 0
1000 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1001 struct net_device *pDev = NULL;
1002
1003 Log(("vboxNetFltLinuxRetainNetDev\n"));
1004 /*
1005 * Be careful here to avoid problems racing the detached callback.
1006 */
1007 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1008 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
1009 {
1010 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1011 if (pDev)
1012 {
1013 dev_hold(pDev);
1014 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n",
1015 pDev, pDev->name,
1016#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1017 netdev_refcnt_read(pDev)
1018#else
1019 atomic_read(&pDev->refcnt)
1020#endif
1021 ));
1022 }
1023 }
1024 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1025
1026 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
1027 return pDev;
1028#else
1029 return ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1030#endif
1031}
1032
1033
1034/**
1035 * Release the host interface handle previously retained
1036 * by vboxNetFltLinuxRetainNetDev.
1037 *
1038 * @param pThis The instance.
1039 * @param pDev The vboxNetFltLinuxRetainNetDev
1040 * return value, NULL is fine.
1041 */
1042DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
1043{
1044#if 0
1045 Log(("vboxNetFltLinuxReleaseNetDev\n"));
1046 NOREF(pThis);
1047 if (pDev)
1048 {
1049 dev_put(pDev);
1050 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n",
1051 pDev, pDev->name,
1052#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
1053 netdev_refcnt_read(pDev)
1054#else
1055 atomic_read(&pDev->refcnt)
1056#endif
1057 ));
1058 }
1059 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
1060#endif
1061}
1062
1063#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
1064#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
1065
1066/**
1067 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
1068 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
1069 *
1070 * @returns true / false accordingly.
1071 * @param pBuf The sk_buff.
1072 */
1073DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
1074{
1075 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
1076}
1077
1078
1079/**
1080 * Internal worker that create a linux sk_buff for a
1081 * (scatter/)gather list.
1082 *
1083 * @returns Pointer to the sk_buff.
1084 * @param pThis The instance.
1085 * @param pSG The (scatter/)gather list.
1086 * @param fDstWire Set if the destination is the wire.
1087 */
1088static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
1089{
1090 struct sk_buff *pPkt;
1091 struct net_device *pDev;
1092 unsigned fGsoType = 0;
1093
1094 if (pSG->cbTotal == 0)
1095 {
1096 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
1097 return NULL;
1098 }
1099
1100 /** @todo We should use fragments mapping the SG buffers with large packets.
1101 * 256 bytes seems to be the a threshold used a lot for this. It
1102 * requires some nasty work on the intnet side though... */
1103 /*
1104 * Allocate a packet and copy over the data.
1105 */
1106 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1107 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
1108 if (RT_UNLIKELY(!pPkt))
1109 {
1110 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
1111 pSG->pvUserData = NULL;
1112 return NULL;
1113 }
1114 pPkt->dev = pDev;
1115 pPkt->ip_summed = CHECKSUM_NONE;
1116
1117 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
1118 skb_reserve(pPkt, NET_IP_ALIGN);
1119
1120 /* Copy the segments. */
1121 skb_put(pPkt, pSG->cbTotal);
1122 IntNetSgRead(pSG, pPkt->data);
1123
1124#if defined(VBOXNETFLT_WITH_GSO_XMIT_WIRE) || defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
1125 /*
1126 * Setup GSO if used by this packet.
1127 */
1128 switch ((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type)
1129 {
1130 default:
1131 AssertMsgFailed(("%u (%s)\n", pSG->GsoCtx.u8Type, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pSG->GsoCtx.u8Type) ));
1132 /* fall thru */
1133 case PDMNETWORKGSOTYPE_INVALID:
1134 fGsoType = 0;
1135 break;
1136 case PDMNETWORKGSOTYPE_IPV4_TCP:
1137 fGsoType = SKB_GSO_TCPV4;
1138 break;
1139 case PDMNETWORKGSOTYPE_IPV4_UDP:
1140 fGsoType = SKB_GSO_UDP;
1141 break;
1142 case PDMNETWORKGSOTYPE_IPV6_TCP:
1143 fGsoType = SKB_GSO_TCPV6;
1144 break;
1145 }
1146 if (fGsoType)
1147 {
1148 struct skb_shared_info *pShInfo = skb_shinfo(pPkt);
1149
1150 pShInfo->gso_type = fGsoType | SKB_GSO_DODGY;
1151 pShInfo->gso_size = pSG->GsoCtx.cbMaxSeg;
1152 pShInfo->gso_segs = PDMNetGsoCalcSegmentCount(&pSG->GsoCtx, pSG->cbTotal);
1153
1154 /*
1155 * We need to set checksum fields even if the packet goes to the host
1156 * directly as it may be immediately forwarded by IP layer @bugref{5020}.
1157 */
1158 Assert(skb_headlen(pPkt) >= pSG->GsoCtx.cbHdrs);
1159 pPkt->ip_summed = CHECKSUM_PARTIAL;
1160# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1161 pPkt->csum_start = skb_headroom(pPkt) + pSG->GsoCtx.offHdr2;
1162 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1163 pPkt->csum_offset = RT_OFFSETOF(RTNETTCP, th_sum);
1164 else
1165 pPkt->csum_offset = RT_OFFSETOF(RTNETUDP, uh_sum);
1166# else
1167 pPkt->h.raw = pPkt->data + pSG->GsoCtx.offHdr2;
1168 if (fGsoType & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
1169 pPkt->csum = RT_OFFSETOF(RTNETTCP, th_sum);
1170 else
1171 pPkt->csum = RT_OFFSETOF(RTNETUDP, uh_sum);
1172# endif
1173 if (!fDstWire)
1174 PDMNetGsoPrepForDirectUse(&pSG->GsoCtx, pPkt->data, pSG->cbTotal, PDMNETCSUMTYPE_PSEUDO);
1175 }
1176#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE || VBOXNETFLT_WITH_GSO_XMIT_HOST */
1177
1178 /*
1179 * Finish up the socket buffer.
1180 */
1181 pPkt->protocol = eth_type_trans(pPkt, pDev);
1182 if (fDstWire)
1183 {
1184 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
1185
1186 /* Restore ethernet header back. */
1187 skb_push(pPkt, ETH_HLEN); /** @todo VLAN: +4 if VLAN? */
1188 VBOX_SKB_RESET_MAC_HDR(pPkt);
1189 }
1190 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
1191
1192 return pPkt;
1193}
1194
1195
1196/**
1197 * Initializes a SG list from an sk_buff.
1198 *
1199 * @returns Number of segments.
1200 * @param pThis The instance.
1201 * @param pBuf The sk_buff.
1202 * @param pSG The SG.
1203 * @param pvFrame The frame pointer, optional.
1204 * @param cSegs The number of segments allocated for the SG.
1205 * This should match the number in the mbuf exactly!
1206 * @param fSrc The source of the frame.
1207 * @param pGso Pointer to the GSO context if it's a GSO
1208 * internal network frame. NULL if regular frame.
1209 */
1210DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
1211 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1212{
1213 int i;
1214 NOREF(pThis);
1215
1216 Assert(!skb_shinfo(pBuf)->frag_list);
1217
1218 if (!pGsoCtx)
1219 IntNetSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
1220 else
1221 IntNetSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
1222
1223#ifdef VBOXNETFLT_SG_SUPPORT
1224 pSG->aSegs[0].cb = skb_headlen(pBuf);
1225 pSG->aSegs[0].pv = pBuf->data;
1226 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1227
1228 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1229 {
1230 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
1231 pSG->aSegs[i+1].cb = pFrag->size;
1232 pSG->aSegs[i+1].pv = kmap(pFrag->page);
1233 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
1234 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
1235 }
1236 ++i;
1237
1238#else
1239 pSG->aSegs[0].cb = pBuf->len;
1240 pSG->aSegs[0].pv = pBuf->data;
1241 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
1242 i = 1;
1243#endif
1244
1245 pSG->cSegsUsed = i;
1246
1247#ifdef PADD_RUNT_FRAMES_FROM_HOST
1248 /*
1249 * Add a trailer if the frame is too small.
1250 *
1251 * Since we're getting to the packet before it is framed, it has not
1252 * yet been padded. The current solution is to add a segment pointing
1253 * to a buffer containing all zeros and pray that works for all frames...
1254 */
1255 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
1256 {
1257 static uint8_t const s_abZero[128] = {0};
1258
1259 AssertReturnVoid(i < cSegs);
1260
1261 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
1262 pSG->aSegs[i].pv = (void *)&s_abZero[0];
1263 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
1264 pSG->cbTotal = 60;
1265 pSG->cSegsUsed++;
1266 Assert(i + 1 <= pSG->cSegsAlloc)
1267 }
1268#endif
1269
1270 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
1271 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
1272 for (i = 0; i < pSG->cSegsUsed; i++)
1273 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
1274 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
1275}
1276
1277/**
1278 * Packet handler,
1279 *
1280 * @returns 0 or EJUSTRETURN.
1281 * @param pThis The instance.
1282 * @param pMBuf The mbuf.
1283 * @param pvFrame The start of the frame, optional.
1284 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
1285 * @param eProtocol The protocol.
1286 */
1287#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
1288static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1289 struct net_device *pSkbDev,
1290 struct packet_type *pPacketType,
1291 struct net_device *pOrigDev)
1292#else
1293static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
1294 struct net_device *pSkbDev,
1295 struct packet_type *pPacketType)
1296#endif
1297{
1298 PVBOXNETFLTINS pThis;
1299 struct net_device *pDev;
1300 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
1301 pBuf, pSkbDev, pPacketType));
1302#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1303 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1304 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1305 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1306#else
1307 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1308 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1309#endif
1310 /*
1311 * Drop it immediately?
1312 */
1313 if (!pBuf)
1314 return 0;
1315
1316 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
1317 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1318 if (pThis->u.s.pDev != pSkbDev)
1319 {
1320 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
1321 return 0;
1322 }
1323
1324 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1325 if (vboxNetFltLinuxSkBufIsOur(pBuf))
1326 {
1327 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
1328 dev_kfree_skb(pBuf);
1329 return 0;
1330 }
1331
1332#ifndef VBOXNETFLT_SG_SUPPORT
1333 {
1334 /*
1335 * Get rid of fragmented packets, they cause too much trouble.
1336 */
1337 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
1338 kfree_skb(pBuf);
1339 if (!pCopy)
1340 {
1341 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
1342 return 0;
1343 }
1344 pBuf = pCopy;
1345# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
1346 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1347 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1348 Log4(("vboxNetFltLinuxPacketHandler: packet dump follows:\n%.*Rhxd\n", pBuf->len-pBuf->data_len, skb_mac_header(pBuf)));
1349# else
1350 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
1351 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
1352# endif
1353 }
1354#endif
1355
1356#ifdef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1357 /* Forward it to the internal network. */
1358 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1359#else
1360 /* Add the packet to transmit queue and schedule the bottom half. */
1361 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
1362 schedule_work(&pThis->u.s.XmitTask);
1363 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
1364 &pThis->u.s.XmitTask, pBuf));
1365#endif
1366
1367 /* It does not really matter what we return, it is ignored by the kernel. */
1368 return 0;
1369}
1370
1371/**
1372 * Calculate the number of INTNETSEG segments the socket buffer will need.
1373 *
1374 * @returns Segment count.
1375 * @param pBuf The socket buffer.
1376 */
1377DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
1378{
1379#ifdef VBOXNETFLT_SG_SUPPORT
1380 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
1381#else
1382 unsigned cSegs = 1;
1383#endif
1384#ifdef PADD_RUNT_FRAMES_FROM_HOST
1385 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
1386 if (pBuf->len < 60)
1387 cSegs++;
1388#endif
1389 return cSegs;
1390}
1391
1392/**
1393 * Destroy the intnet scatter / gather buffer created by
1394 * vboxNetFltLinuxSkBufToSG.
1395 */
1396static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
1397{
1398#ifdef VBOXNETFLT_SG_SUPPORT
1399 int i;
1400
1401 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
1402 {
1403 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
1404 kunmap(pSG->aSegs[i+1].pv);
1405 }
1406#endif
1407 NOREF(pSG);
1408}
1409
1410#ifdef LOG_ENABLED
1411/**
1412 * Logging helper.
1413 */
1414static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
1415{
1416 uint8_t *pInt, *pExt;
1417 static int iPacketNo = 1;
1418 iPacketNo += iIncrement;
1419 if (fEgress)
1420 {
1421 pExt = pSG->aSegs[0].pv;
1422 pInt = pExt + 6;
1423 }
1424 else
1425 {
1426 pInt = pSG->aSegs[0].pv;
1427 pExt = pInt + 6;
1428 }
1429 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
1430 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
1431 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
1432 fEgress ? "-->" : "<--", pszWhere,
1433 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
1434 pSG->cbTotal, iPacketNo));
1435 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
1436}
1437#else
1438# define vboxNetFltDumpPacket(a, b, c, d) do {} while (0)
1439#endif
1440
1441#ifdef VBOXNETFLT_WITH_GSO_RECV
1442
1443/**
1444 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
1445 * GSO socket buffer without having to segment it.
1446 *
1447 * @returns true on success, false if needs segmenting.
1448 * @param pThis The net filter instance.
1449 * @param pSkb The GSO socket buffer.
1450 * @param fSrc The source.
1451 * @param pGsoCtx Where to return the GSO context on success.
1452 */
1453static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
1454 PPDMNETWORKGSO pGsoCtx)
1455{
1456 PDMNETWORKGSOTYPE enmGsoType;
1457 uint16_t uEtherType;
1458 unsigned int cbTransport;
1459 unsigned int offTransport;
1460 unsigned int cbTransportHdr;
1461 unsigned uProtocol;
1462 union
1463 {
1464 RTNETIPV4 IPv4;
1465 RTNETIPV6 IPv6;
1466 RTNETTCP Tcp;
1467 uint8_t ab[40];
1468 uint16_t au16[40/2];
1469 uint32_t au32[40/4];
1470 } Buf;
1471
1472 /*
1473 * Check the GSO properties of the socket buffer and make sure it fits.
1474 */
1475 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
1476 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
1477 {
1478 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
1479 return false;
1480 }
1481 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
1482 || pSkb->len > VBOX_MAX_GSO_SIZE ))
1483 {
1484 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
1485 return false;
1486 }
1487 /*
1488 * It is possible to receive GSO packets from wire if GRO is enabled.
1489 */
1490 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
1491 {
1492 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
1493#ifdef VBOXNETFLT_WITH_GRO
1494 /*
1495 * The packet came from the wire and the driver has already consumed
1496 * mac header. We need to restore it back.
1497 */
1498 pSkb->mac_len = skb_network_header(pSkb) - skb_mac_header(pSkb);
1499 skb_push(pSkb, pSkb->mac_len);
1500 Log5(("vboxNetFltLinuxCanForwardAsGso: mac_len=%d data=%p mac_header=%p network_header=%p\n",
1501 pSkb->mac_len, pSkb->data, skb_mac_header(pSkb), skb_network_header(pSkb)));
1502#else /* !VBOXNETFLT_WITH_GRO */
1503 /* Older kernels didn't have GRO. */
1504 return false;
1505#endif /* !VBOXNETFLT_WITH_GRO */
1506 }
1507 else
1508 {
1509 /*
1510 * skb_gso_segment does the following. Do we need to do it as well?
1511 */
1512#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1513 skb_reset_mac_header(pSkb);
1514 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
1515#else
1516 pSkb->mac.raw = pSkb->data;
1517 pSkb->mac_len = pSkb->nh.raw - pSkb->data;
1518#endif
1519 }
1520
1521 /*
1522 * Switch on the ethertype.
1523 */
1524 uEtherType = pSkb->protocol;
1525 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
1526 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
1527 {
1528 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
1529 if (puEtherType)
1530 uEtherType = *puEtherType;
1531 }
1532 switch (uEtherType)
1533 {
1534 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
1535 {
1536 unsigned int cbHdr;
1537 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
1538 if (RT_UNLIKELY(!pIPv4))
1539 {
1540 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
1541 return false;
1542 }
1543
1544 cbHdr = pIPv4->ip_hl * 4;
1545 cbTransport = RT_N2H_U16(pIPv4->ip_len);
1546 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
1547 || cbHdr > cbTransport ))
1548 {
1549 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
1550 return false;
1551 }
1552 cbTransport -= cbHdr;
1553 offTransport = pSkb->mac_len + cbHdr;
1554 uProtocol = pIPv4->ip_p;
1555 if (uProtocol == RTNETIPV4_PROT_TCP)
1556 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
1557 else if (uProtocol == RTNETIPV4_PROT_UDP)
1558 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1559 else /** @todo IPv6: 4to6 tunneling */
1560 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1561 break;
1562 }
1563
1564 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
1565 {
1566 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
1567 if (RT_UNLIKELY(!pIPv6))
1568 {
1569 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
1570 return false;
1571 }
1572
1573 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
1574 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
1575 uProtocol = pIPv6->ip6_nxt;
1576 /** @todo IPv6: Dig our way out of the other headers. */
1577 if (uProtocol == RTNETIPV4_PROT_TCP)
1578 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
1579 else if (uProtocol == RTNETIPV4_PROT_UDP)
1580 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
1581 else
1582 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
1583 break;
1584 }
1585
1586 default:
1587 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
1588 return false;
1589 }
1590
1591 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
1592 {
1593 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
1594 return false;
1595 }
1596
1597 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
1598 || offTransport + cbTransport > pSkb->len
1599 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
1600 {
1601 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
1602 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
1603 return false;
1604 }
1605
1606 /*
1607 * Check the TCP/UDP bits.
1608 */
1609 if (uProtocol == RTNETIPV4_PROT_TCP)
1610 {
1611 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
1612 if (RT_UNLIKELY(!pTcp))
1613 {
1614 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
1615 return false;
1616 }
1617
1618 cbTransportHdr = pTcp->th_off * 4;
1619 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
1620 || cbTransportHdr > cbTransport
1621 || offTransport + cbTransportHdr >= UINT8_MAX
1622 || offTransport + cbTransportHdr >= pSkb->len ))
1623 {
1624 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
1625 return false;
1626 }
1627
1628 }
1629 else
1630 {
1631 Assert(uProtocol == RTNETIPV4_PROT_UDP);
1632 cbTransportHdr = sizeof(RTNETUDP);
1633 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
1634 || offTransport + cbTransportHdr >= pSkb->len ))
1635 {
1636 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
1637 return false;
1638 }
1639 }
1640
1641 /*
1642 * We're good, init the GSO context.
1643 */
1644 pGsoCtx->u8Type = enmGsoType;
1645 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
1646 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
1647 pGsoCtx->offHdr1 = pSkb->mac_len;
1648 pGsoCtx->offHdr2 = offTransport;
1649 pGsoCtx->au8Unused[0] = 0;
1650 pGsoCtx->au8Unused[1] = 0;
1651
1652 return true;
1653}
1654
1655/**
1656 * Forward the socket buffer as a GSO internal network frame.
1657 *
1658 * @returns IPRT status code.
1659 * @param pThis The net filter instance.
1660 * @param pSkb The GSO socket buffer.
1661 * @param fSrc The source.
1662 * @param pGsoCtx Where to return the GSO context on success.
1663 */
1664static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
1665{
1666 int rc;
1667 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
1668 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
1669 {
1670 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1671 if (RT_LIKELY(pSG))
1672 {
1673 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
1674
1675 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1676 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1677
1678 vboxNetFltLinuxDestroySG(pSG);
1679 rc = VINF_SUCCESS;
1680 }
1681 else
1682 {
1683 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
1684 rc = VERR_NO_MEMORY;
1685 }
1686 }
1687 else
1688 {
1689 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1690 rc = VERR_INTERNAL_ERROR_3;
1691 }
1692
1693 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1694 dev_kfree_skb(pSkb);
1695 return rc;
1696}
1697
1698#endif /* VBOXNETFLT_WITH_GSO_RECV */
1699
1700/**
1701 * Worker for vboxNetFltLinuxForwardToIntNet.
1702 *
1703 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
1704 * @param pThis The net filter instance.
1705 * @param pBuf The socket buffer.
1706 * @param fSrc The source.
1707 */
1708static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
1709{
1710 int rc;
1711 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
1712 if (cSegs <= MAX_SKB_FRAGS + 1)
1713 {
1714 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
1715 if (RT_LIKELY(pSG))
1716 {
1717 if (fSrc & INTNETTRUNKDIR_WIRE)
1718 {
1719 /*
1720 * The packet came from wire, ethernet header was removed by device driver.
1721 * Restore it.
1722 */
1723 skb_push(pBuf, ETH_HLEN);
1724 }
1725
1726 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
1727
1728 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
1729 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, fSrc);
1730
1731 vboxNetFltLinuxDestroySG(pSG);
1732 rc = VINF_SUCCESS;
1733 }
1734 else
1735 {
1736 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
1737 rc = VERR_NO_MEMORY;
1738 }
1739 }
1740 else
1741 {
1742 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
1743 rc = VERR_INTERNAL_ERROR_3;
1744 }
1745
1746 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
1747 dev_kfree_skb(pBuf);
1748 return rc;
1749}
1750
1751/**
1752 *
1753 * @param pBuf The socket buffer. This is consumed by this function.
1754 */
1755static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
1756{
1757 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
1758
1759#ifdef VBOXNETFLT_WITH_GSO
1760 if (skb_is_gso(pBuf))
1761 {
1762 PDMNETWORKGSO GsoCtx;
1763 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
1764 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
1765# ifdef VBOXNETFLT_WITH_GSO_RECV
1766 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
1767 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
1768 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
1769 else
1770# endif
1771 {
1772 /* Need to segment the packet */
1773 struct sk_buff *pNext;
1774 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
1775 if (IS_ERR(pSegment))
1776 {
1777 dev_kfree_skb(pBuf);
1778 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
1779 return;
1780 }
1781
1782 for (; pSegment; pSegment = pNext)
1783 {
1784 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
1785 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
1786 pNext = pSegment->next;
1787 pSegment->next = 0;
1788 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
1789 }
1790 dev_kfree_skb(pBuf);
1791 }
1792 }
1793 else
1794#endif /* VBOXNETFLT_WITH_GSO */
1795 {
1796 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
1797 {
1798#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1799 /*
1800 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
1801 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
1802 * header length from the header itself and reconstruct 'h' pointer
1803 * to TCP (or whatever) header.
1804 */
1805 unsigned char *tmp = pBuf->h.raw;
1806 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
1807 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
1808#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1809 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
1810 {
1811 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
1812 dev_kfree_skb(pBuf);
1813 return;
1814 }
1815#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
1816 /* Restore the original (wrong) pointer. */
1817 pBuf->h.raw = tmp;
1818#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
1819 }
1820 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
1821 }
1822}
1823
1824#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
1825/**
1826 * Work queue handler that forwards the socket buffers queued by
1827 * vboxNetFltLinuxPacketHandler to the internal network.
1828 *
1829 * @param pWork The work queue.
1830 */
1831# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1832static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
1833# else
1834static void vboxNetFltLinuxXmitTask(void *pWork)
1835# endif
1836{
1837 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
1838 struct sk_buff *pBuf;
1839
1840 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
1841
1842 /*
1843 * Active? Retain the instance and increment the busy counter.
1844 */
1845 if (vboxNetFltTryRetainBusyActive(pThis))
1846 {
1847 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
1848 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
1849
1850 vboxNetFltRelease(pThis, true /* fBusy */);
1851 }
1852 else
1853 {
1854 /** @todo Shouldn't we just drop the packets here? There is little point in
1855 * making them accumulate when the VM is paused and it'll only waste
1856 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
1857 * before start draining the packets (goes for the intnet ring buf
1858 * too)? */
1859 }
1860}
1861#endif /* !VBOXNETFLT_LINUX_NO_XMIT_QUEUE */
1862
1863/**
1864 * Reports the GSO capabilities of the hardware NIC.
1865 *
1866 * @param pThis The net filter instance. The caller hold a
1867 * reference to this.
1868 */
1869static void vboxNetFltLinuxReportNicGsoCapabilities(PVBOXNETFLTINS pThis)
1870{
1871#ifdef VBOXNETFLT_WITH_GSO_XMIT_WIRE
1872 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
1873 {
1874 struct net_device *pDev;
1875 PINTNETTRUNKSWPORT pSwitchPort;
1876 unsigned int fFeatures;
1877 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1878
1879 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1880
1881 pSwitchPort = pThis->pSwitchPort; /* this doesn't need to be here, but it doesn't harm. */
1882 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
1883 if (pDev)
1884 fFeatures = pDev->features;
1885 else
1886 fFeatures = 0;
1887
1888 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1889
1890 if (pThis->pSwitchPort)
1891 {
1892 /* Set/update the GSO capabilities of the NIC. */
1893 uint32_t fGsoCapabilites = 0;
1894 if (fFeatures & NETIF_F_TSO)
1895 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP);
1896 if (fFeatures & NETIF_F_TSO6)
1897 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP);
1898# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
1899 if (fFeatures & NETIF_F_UFO)
1900 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP);
1901 if (fFeatures & NETIF_F_UFO)
1902 fGsoCapabilites |= RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP);
1903# endif
1904 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort, fGsoCapabilites, INTNETTRUNKDIR_WIRE);
1905 }
1906
1907 vboxNetFltRelease(pThis, true /*fBusy*/);
1908 }
1909#endif /* VBOXNETFLT_WITH_GSO_XMIT_WIRE */
1910}
1911
1912/**
1913 * Helper that determines whether the host (ignoreing us) is operating the
1914 * interface in promiscuous mode or not.
1915 */
1916static bool vboxNetFltLinuxPromiscuous(PVBOXNETFLTINS pThis)
1917{
1918 bool fRc = false;
1919 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1920 if (pDev)
1921 {
1922 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1923 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1924 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1925 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1926 }
1927 return fRc;
1928}
1929
1930#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1931/**
1932 * Helper for detecting TAP devices.
1933 */
1934static bool vboxNetFltIsTapDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1935{
1936 if (pDev->ethtool_ops && pDev->ethtool_ops->get_drvinfo)
1937 {
1938 struct ethtool_drvinfo Info;
1939
1940 memset(&Info, 0, sizeof(Info));
1941 Info.cmd = ETHTOOL_GDRVINFO;
1942 pDev->ethtool_ops->get_drvinfo(pDev, &Info);
1943 Log3(("vboxNetFltIsTapDevice: driver=%s version=%s bus_info=%s\n",
1944 Info.driver, Info.version, Info.bus_info));
1945
1946 return !strncmp(Info.driver, "tun", 4)
1947 && !strncmp(Info.bus_info, "tap", 4);
1948 }
1949
1950 return false;
1951}
1952
1953/**
1954 * Helper for updating the link state of TAP devices.
1955 * Only TAP devices are affected.
1956 */
1957static void vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
1958{
1959 if (vboxNetFltIsTapDevice(pThis, pDev))
1960 {
1961 Log3(("vboxNetFltSetTapLinkState: bringing %s tap device link state\n",
1962 fLinkUp ? "up" : "down"));
1963 netif_tx_lock_bh(pDev);
1964 if (fLinkUp)
1965 netif_carrier_on(pDev);
1966 else
1967 netif_carrier_off(pDev);
1968 netif_tx_unlock_bh(pDev);
1969 }
1970}
1971#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
1972DECLINLINE(void) vboxNetFltSetTapLinkState(PVBOXNETFLTINS pThis, struct net_device *pDev, bool fLinkUp)
1973{
1974 /* Nothing to do for pre-2.6.36 kernels. */
1975}
1976#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) */
1977
1978/**
1979 * Internal worker for vboxNetFltLinuxNotifierCallback.
1980 *
1981 * @returns VBox status code.
1982 * @param pThis The instance.
1983 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
1984 * flood the release log.
1985 */
1986static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
1987{
1988 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1989 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
1990
1991 /*
1992 * Retain and store the device.
1993 */
1994 dev_hold(pDev);
1995
1996 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1997 ASMAtomicUoWritePtr(&pThis->u.s.pDev, pDev);
1998 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
1999
2000 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n",
2001 pDev, pDev->name,
2002#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2003 netdev_refcnt_read(pDev)
2004#else
2005 atomic_read(&pDev->refcnt)
2006#endif
2007 ));
2008 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2009 pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2010
2011 /* Get the mac address while we still have a valid net_device reference. */
2012 memcpy(&pThis->u.s.MacAddr, pDev->dev_addr, sizeof(pThis->u.s.MacAddr));
2013
2014 /*
2015 * Install a packet filter for this device with a protocol wildcard (ETH_P_ALL).
2016 */
2017 pThis->u.s.PacketType.type = __constant_htons(ETH_P_ALL);
2018 pThis->u.s.PacketType.dev = pDev;
2019 pThis->u.s.PacketType.func = vboxNetFltLinuxPacketHandler;
2020 dev_add_pack(&pThis->u.s.PacketType);
2021
2022#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2023 vboxNetFltLinuxHookDev(pThis, pDev);
2024#endif
2025#ifdef VBOXNETFLT_WITH_QDISC
2026 vboxNetFltLinuxQdiscInstall(pThis, pDev);
2027#endif /* VBOXNETFLT_WITH_QDISC */
2028
2029 /*
2030 * If attaching to TAP interface we need to bring the link state up
2031 * starting from 2.6.36 kernel.
2032 */
2033 vboxNetFltSetTapLinkState(pThis, pDev, true);
2034
2035 /*
2036 * Set indicators that require the spinlock. Be abit paranoid about racing
2037 * the device notification handle.
2038 */
2039 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2040 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2041 if (pDev)
2042 {
2043 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
2044 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
2045 pDev = NULL; /* don't dereference it */
2046 }
2047 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2048 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
2049
2050 /*
2051 * If the above succeeded report GSO capabilities, if not undo and
2052 * release the device.
2053 */
2054 if (!pDev)
2055 {
2056 Assert(pThis->pSwitchPort);
2057 if (vboxNetFltTryRetainBusyNotDisconnected(pThis))
2058 {
2059 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2060 pThis->pSwitchPort->pfnReportMacAddress(pThis->pSwitchPort, &pThis->u.s.MacAddr);
2061 pThis->pSwitchPort->pfnReportPromiscuousMode(pThis->pSwitchPort, vboxNetFltLinuxPromiscuous(pThis));
2062 pThis->pSwitchPort->pfnReportNoPreemptDsts(pThis->pSwitchPort, INTNETTRUNKDIR_WIRE | INTNETTRUNKDIR_HOST);
2063 vboxNetFltRelease(pThis, true /*fBusy*/);
2064 }
2065 }
2066 else
2067 {
2068#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2069 vboxNetFltLinuxUnhookDev(pThis, pDev);
2070#endif
2071#ifdef VBOXNETFLT_WITH_QDISC
2072 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2073#endif /* VBOXNETFLT_WITH_QDISC */
2074 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2075 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2076 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2077 dev_put(pDev);
2078 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n",
2079 pDev, pDev->name,
2080#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2081 netdev_refcnt_read(pDev)
2082#else
2083 atomic_read(&pDev->refcnt)
2084#endif
2085 ));
2086 }
2087
2088 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.MacAddr), &pThis->u.s.MacAddr));
2089 return VINF_SUCCESS;
2090}
2091
2092
2093static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
2094{
2095 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2096
2097 Assert(!pThis->fDisconnectedFromHost);
2098
2099#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2100 vboxNetFltLinuxUnhookDev(pThis, pDev);
2101#endif
2102#ifdef VBOXNETFLT_WITH_QDISC
2103 vboxNetFltLinuxQdiscRemove(pThis, pDev);
2104#endif /* VBOXNETFLT_WITH_QDISC */
2105
2106 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2107 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
2108 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
2109 ASMAtomicUoWriteNullPtr(&pThis->u.s.pDev);
2110 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2111
2112 dev_remove_pack(&pThis->u.s.PacketType);
2113#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2114 skb_queue_purge(&pThis->u.s.XmitQueue);
2115#endif
2116 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2117 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n",
2118 pDev, pDev->name,
2119#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2120 netdev_refcnt_read(pDev)
2121#else
2122 atomic_read(&pDev->refcnt)
2123#endif
2124 ));
2125 dev_put(pDev);
2126
2127 return NOTIFY_OK;
2128}
2129
2130static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
2131{
2132 /* Check if we are not suspended and promiscuous mode has not been set. */
2133 if ( pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
2134 && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2135 {
2136 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2137 dev_set_promiscuity(pDev, 1);
2138 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
2139 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2140 }
2141 else
2142 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2143 return NOTIFY_OK;
2144}
2145
2146static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
2147{
2148 /* Undo promiscuous mode if we has set it. */
2149 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
2150 {
2151 /* Note that there is no need for locking as the kernel got hold of the lock already. */
2152 dev_set_promiscuity(pDev, -1);
2153 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
2154 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2155 }
2156 else
2157 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2158 return NOTIFY_OK;
2159}
2160
2161#ifdef LOG_ENABLED
2162/** Stringify the NETDEV_XXX constants. */
2163static const char *vboxNetFltLinuxGetNetDevEventName(unsigned long ulEventType)
2164{
2165 const char *pszEvent = "NETDRV_<unknown>";
2166 switch (ulEventType)
2167 {
2168 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
2169 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
2170 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
2171 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
2172 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
2173 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
2174 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
2175 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
2176 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
2177 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
2178# ifdef NETDEV_FEAT_CHANGE
2179 case NETDEV_FEAT_CHANGE: pszEvent = "NETDEV_FEAT_CHANGE"; break;
2180# endif
2181 }
2182 return pszEvent;
2183}
2184#endif /* LOG_ENABLED */
2185
2186/**
2187 * Callback for listening to netdevice events.
2188 *
2189 * This works the rediscovery, clean up on unregistration, promiscuity on
2190 * up/down, and GSO feature changes from ethtool.
2191 *
2192 * @returns NOTIFY_OK
2193 * @param self Pointer to our notifier registration block.
2194 * @param ulEventType The event.
2195 * @param ptr Event specific, but it is usually the device it
2196 * relates to.
2197 */
2198static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
2199
2200{
2201 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
2202 struct net_device *pDev = (struct net_device *)ptr;
2203 int rc = NOTIFY_OK;
2204
2205 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
2206 vboxNetFltLinuxGetNetDevEventName(ulEventType), ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *)));
2207 if ( ulEventType == NETDEV_REGISTER
2208 && !strcmp(pDev->name, pThis->szName))
2209 {
2210 vboxNetFltLinuxAttachToInterface(pThis, pDev);
2211 }
2212 else
2213 {
2214 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2215 if (pDev == ptr)
2216 {
2217 switch (ulEventType)
2218 {
2219 case NETDEV_UNREGISTER:
2220 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
2221 break;
2222 case NETDEV_UP:
2223 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
2224 break;
2225 case NETDEV_GOING_DOWN:
2226 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
2227 break;
2228 case NETDEV_CHANGENAME:
2229 break;
2230#ifdef NETDEV_FEAT_CHANGE
2231 case NETDEV_FEAT_CHANGE:
2232 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2233 break;
2234#endif
2235 }
2236 }
2237 }
2238
2239 return rc;
2240}
2241
2242bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
2243{
2244 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
2245}
2246
2247int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, void *pvIfData, PINTNETSG pSG, uint32_t fDst)
2248{
2249 struct net_device * pDev;
2250 int err;
2251 int rc = VINF_SUCCESS;
2252 NOREF(pvIfData);
2253
2254 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
2255
2256 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2257 if (pDev)
2258 {
2259 /*
2260 * Create a sk_buff for the gather list and push it onto the wire.
2261 */
2262 if (fDst & INTNETTRUNKDIR_WIRE)
2263 {
2264 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
2265 if (pBuf)
2266 {
2267 vboxNetFltDumpPacket(pSG, true, "wire", 1);
2268 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2269 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
2270 err = dev_queue_xmit(pBuf);
2271 if (err)
2272 rc = RTErrConvertFromErrno(err);
2273 }
2274 else
2275 rc = VERR_NO_MEMORY;
2276 }
2277
2278 /*
2279 * Create a sk_buff for the gather list and push it onto the host stack.
2280 */
2281 if (fDst & INTNETTRUNKDIR_HOST)
2282 {
2283 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
2284 if (pBuf)
2285 {
2286 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
2287 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
2288 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
2289 err = netif_rx_ni(pBuf);
2290 if (err)
2291 rc = RTErrConvertFromErrno(err);
2292 }
2293 else
2294 rc = VERR_NO_MEMORY;
2295 }
2296
2297 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2298 }
2299
2300 return rc;
2301}
2302
2303
2304void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
2305{
2306 struct net_device * pDev;
2307
2308 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
2309 pThis, pThis->szName, fActive?"true":"false",
2310 pThis->fDisablePromiscuous?"true":"false"));
2311
2312 if (pThis->fDisablePromiscuous)
2313 return;
2314
2315 pDev = vboxNetFltLinuxRetainNetDev(pThis);
2316 if (pDev)
2317 {
2318 /*
2319 * This api is a bit weird, the best reference is the code.
2320 *
2321 * Also, we have a bit or race conditions wrt the maintenance of
2322 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
2323 */
2324#ifdef LOG_ENABLED
2325 u_int16_t fIf;
2326 unsigned const cPromiscBefore = pDev->promiscuity;
2327#endif
2328 if (fActive)
2329 {
2330 Assert(!pThis->u.s.fPromiscuousSet);
2331
2332 rtnl_lock();
2333 dev_set_promiscuity(pDev, 1);
2334 rtnl_unlock();
2335 pThis->u.s.fPromiscuousSet = true;
2336 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2337 }
2338 else
2339 {
2340 if (pThis->u.s.fPromiscuousSet)
2341 {
2342 rtnl_lock();
2343 dev_set_promiscuity(pDev, -1);
2344 rtnl_unlock();
2345 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
2346 }
2347 pThis->u.s.fPromiscuousSet = false;
2348
2349#ifdef LOG_ENABLED
2350 fIf = dev_get_flags(pDev);
2351 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
2352#endif
2353 }
2354
2355 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
2356 }
2357}
2358
2359
2360int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
2361{
2362#ifdef VBOXNETFLT_WITH_QDISC
2363 vboxNetFltLinuxQdiscRemove(pThis, NULL);
2364#endif /* VBOXNETFLT_WITH_QDISC */
2365 /*
2366 * Remove packet handler when we get disconnected from internal switch as
2367 * we don't want the handler to forward packets to disconnected switch.
2368 */
2369 dev_remove_pack(&pThis->u.s.PacketType);
2370 return VINF_SUCCESS;
2371}
2372
2373
2374int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
2375{
2376 /*
2377 * Report the GSO capabilities of the host and device (if connected).
2378 * Note! No need to mark ourselves busy here.
2379 */
2380 /** @todo duplicate work here now? Attach */
2381#if defined(VBOXNETFLT_WITH_GSO_XMIT_HOST)
2382 pThis->pSwitchPort->pfnReportGsoCapabilities(pThis->pSwitchPort,
2383 0
2384 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_TCP)
2385 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_TCP)
2386# if 0 /** @todo GSO: Test UDP offloading (UFO) on linux. */
2387 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV4_UDP)
2388 | RT_BIT_32(PDMNETWORKGSOTYPE_IPV6_UDP)
2389# endif
2390 , INTNETTRUNKDIR_HOST);
2391
2392#endif
2393 vboxNetFltLinuxReportNicGsoCapabilities(pThis);
2394
2395 return VINF_SUCCESS;
2396}
2397
2398
2399void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
2400{
2401 struct net_device *pDev;
2402 bool fRegistered;
2403 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2404
2405#ifdef VBOXNETFLT_WITH_FILTER_HOST2GUEST_SKBS_EXPERIMENT
2406 vboxNetFltLinuxUnhookDev(pThis, NULL);
2407#endif
2408
2409 /** @todo This code may race vboxNetFltLinuxUnregisterDevice (very very
2410 * unlikely, but none the less). Since it doesn't actually update the
2411 * state (just reads it), it is likely to panic in some interesting
2412 * ways. */
2413
2414 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
2415 pDev = ASMAtomicUoReadPtrT(&pThis->u.s.pDev, struct net_device *);
2416 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
2417 RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
2418
2419 if (fRegistered)
2420 {
2421 vboxNetFltSetTapLinkState(pThis, pDev, false);
2422
2423#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2424 skb_queue_purge(&pThis->u.s.XmitQueue);
2425#endif
2426 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
2427 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n",
2428 pDev, pDev->name,
2429#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
2430 netdev_refcnt_read(pDev)
2431#else
2432 atomic_read(&pDev->refcnt)
2433#endif
2434 ));
2435 dev_put(pDev);
2436 }
2437 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
2438 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2439 module_put(THIS_MODULE);
2440}
2441
2442
2443int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
2444{
2445 int err;
2446 NOREF(pvContext);
2447
2448 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
2449 err = register_netdevice_notifier(&pThis->u.s.Notifier);
2450 if (err)
2451 return VERR_INTNET_FLT_IF_FAILED;
2452 if (!pThis->u.s.fRegistered)
2453 {
2454 unregister_netdevice_notifier(&pThis->u.s.Notifier);
2455 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
2456 return VERR_INTNET_FLT_IF_NOT_FOUND;
2457 }
2458
2459 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
2460 if ( pThis->fDisconnectedFromHost
2461 || !try_module_get(THIS_MODULE))
2462 return VERR_INTNET_FLT_IF_FAILED;
2463
2464 return VINF_SUCCESS;
2465}
2466
2467int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
2468{
2469 /*
2470 * Init the linux specific members.
2471 */
2472 pThis->u.s.pDev = NULL;
2473 pThis->u.s.fRegistered = false;
2474 pThis->u.s.fPromiscuousSet = false;
2475 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
2476#ifndef VBOXNETFLT_LINUX_NO_XMIT_QUEUE
2477 skb_queue_head_init(&pThis->u.s.XmitQueue);
2478# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2479 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
2480# else
2481 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
2482# endif
2483#endif
2484
2485 return VINF_SUCCESS;
2486}
2487
2488
2489void vboxNetFltPortOsNotifyMacAddress(PVBOXNETFLTINS pThis, void *pvIfData, PCRTMAC pMac)
2490{
2491 NOREF(pThis); NOREF(pvIfData); NOREF(pMac);
2492}
2493
2494
2495int vboxNetFltPortOsConnectInterface(PVBOXNETFLTINS pThis, void *pvIf, void **pvIfData)
2496{
2497 /* Nothing to do */
2498 NOREF(pThis); NOREF(pvIf); NOREF(pvIfData);
2499 return VINF_SUCCESS;
2500}
2501
2502
2503int vboxNetFltPortOsDisconnectInterface(PVBOXNETFLTINS pThis, void *pvIfData)
2504{
2505 /* Nothing to do */
2506 NOREF(pThis); NOREF(pvIfData);
2507 return VINF_SUCCESS;
2508}
2509
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette