VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 28153

Last change on this file since 28153 was 28153, checked in by vboxsync, 15 years ago

VBoxNetFlt-linux: Receive GSO frames from the host, save calls into SrvIntNet as well as ring-0 <-> ring-3 context switches on the DrvIntNet receive thread.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 47.2 KB
Line 
1/* $Id: VBoxNetFlt-linux.c 28153 2010-04-09 17:58:51Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30#include <linux/miscdevice.h>
31#include <linux/ip.h>
32
33#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
34#include <VBox/log.h>
35#include <VBox/err.h>
36#include <VBox/intnetinline.h>
37#include <VBox/pdmnetinline.h>
38#include <VBox/param.h>
39#include <iprt/alloca.h>
40#include <iprt/assert.h>
41#include <iprt/spinlock.h>
42#include <iprt/semaphore.h>
43#include <iprt/initterm.h>
44#include <iprt/process.h>
45#include <iprt/mem.h>
46#include <iprt/net.h>
47#include <iprt/log.h>
48#include <iprt/mp.h>
49#include <iprt/mem.h>
50#include <iprt/time.h>
51
52#define VBOXNETFLT_OS_SPECFIC 1
53#include "../VBoxNetFltInternal.h"
54
55
56/*******************************************************************************
57* Defined Constants And Macros *
58*******************************************************************************/
59#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
60#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
61#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
62
63#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
64# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
65# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
66#else
67# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
68# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
69#endif
70
71#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
72# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
73#else
74# define CHECKSUM_PARTIAL CHECKSUM_HW
75# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
76# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
77# else
78# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
79# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
80# else
81# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
82# endif
83# endif
84#endif
85
86#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
87/** Indicates that the linux kernel may send us GSO frames. */
88# define VBOXNETFLT_WITH_GSO 1
89#endif
90
91#ifndef NET_IP_ALIGN
92# define NET_IP_ALIGN 2
93#endif
94
95#if 0
96/** Create scatter / gather segments for fragments. When not used, we will
97 * linearize the socket buffer before creating the internal networking SG. */
98# define VBOXNETFLT_SG_SUPPORT 1
99#endif
100
101
102/*******************************************************************************
103* Internal Functions *
104*******************************************************************************/
105static int VBoxNetFltLinuxInit(void);
106static void VBoxNetFltLinuxUnload(void);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112/**
113 * The (common) global data.
114 */
115static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
116
117module_init(VBoxNetFltLinuxInit);
118module_exit(VBoxNetFltLinuxUnload);
119
120MODULE_AUTHOR("Sun Microsystems, Inc.");
121MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
122MODULE_LICENSE("GPL");
123#ifdef MODULE_VERSION
124MODULE_VERSION(VBOX_VERSION_STRING " (" RT_XSTR(INTNETTRUNKIFPORT_VERSION) ")");
125#endif
126
127
128#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) && defined(LOG_ENABLED)
129unsigned dev_get_flags(const struct net_device *dev)
130{
131 unsigned flags;
132
133 flags = (dev->flags & ~(IFF_PROMISC |
134 IFF_ALLMULTI |
135 IFF_RUNNING)) |
136 (dev->gflags & (IFF_PROMISC |
137 IFF_ALLMULTI));
138
139 if (netif_running(dev) && netif_carrier_ok(dev))
140 flags |= IFF_RUNNING;
141
142 return flags;
143}
144#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
145
146
147/**
148 * Initialize module.
149 *
150 * @returns appropriate status code.
151 */
152static int __init VBoxNetFltLinuxInit(void)
153{
154 int rc;
155 /*
156 * Initialize IPRT.
157 */
158 rc = RTR0Init(0);
159 if (RT_SUCCESS(rc))
160 {
161 Log(("VBoxNetFltLinuxInit\n"));
162
163 /*
164 * Initialize the globals and connect to the support driver.
165 *
166 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
167 * for establishing the connect to the support driver.
168 */
169 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
170 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
171 if (RT_SUCCESS(rc))
172 {
173 LogRel(("VBoxNetFlt: Successfully started.\n"));
174 return 0;
175 }
176
177 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
178 RTR0Term();
179 }
180 else
181 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
182
183 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
184 return -RTErrConvertToErrno(rc);
185}
186
187
188/**
189 * Unload the module.
190 *
191 * @todo We have to prevent this if we're busy!
192 */
193static void __exit VBoxNetFltLinuxUnload(void)
194{
195 int rc;
196 Log(("VBoxNetFltLinuxUnload\n"));
197 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
198
199 /*
200 * Undo the work done during start (in reverse order).
201 */
202 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
203 AssertRC(rc); NOREF(rc);
204
205 RTR0Term();
206
207 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
208
209 Log(("VBoxNetFltLinuxUnload - done\n"));
210}
211
212
213/**
214 * Reads and retains the host interface handle.
215 *
216 * @returns The handle, NULL if detached.
217 * @param pThis
218 */
219DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
220{
221#if 0
222 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
223 struct net_device *pDev = NULL;
224
225 Log(("vboxNetFltLinuxRetainNetDev\n"));
226 /*
227 * Be careful here to avoid problems racing the detached callback.
228 */
229 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
230 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
231 {
232 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
233 if (pDev)
234 {
235 dev_hold(pDev);
236 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
237 }
238 }
239 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
240
241 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
242 return pDev;
243#else
244 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
245#endif
246}
247
248
249/**
250 * Release the host interface handle previously retained
251 * by vboxNetFltLinuxRetainNetDev.
252 *
253 * @param pThis The instance.
254 * @param pDev The vboxNetFltLinuxRetainNetDev
255 * return value, NULL is fine.
256 */
257DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
258{
259#if 0
260 Log(("vboxNetFltLinuxReleaseNetDev\n"));
261 NOREF(pThis);
262 if (pDev)
263 {
264 dev_put(pDev);
265 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
266 }
267 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
268#endif
269}
270
271#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
272#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
273
274/**
275 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
276 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
277 *
278 * @returns true / false accordingly.
279 * @param pBuf The sk_buff.
280 */
281DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
282{
283 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
284}
285
286
287/**
288 * Internal worker that create a linux sk_buff for a
289 * (scatter/)gather list.
290 *
291 * @returns Pointer to the sk_buff.
292 * @param pThis The instance.
293 * @param pSG The (scatter/)gather list.
294 */
295static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
296{
297 struct sk_buff *pPkt;
298 struct net_device *pDev;
299
300 if (pSG->cbTotal == 0)
301 {
302 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
303 return NULL;
304 }
305
306 /*
307 * Allocate a packet and copy over the data.
308 */
309 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
310 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
311 if (RT_UNLIKELY(!pPkt))
312 {
313 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
314 pSG->pvUserData = NULL;
315 return NULL;
316 }
317
318 pPkt->dev = pDev;
319
320 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
321 skb_reserve(pPkt, NET_IP_ALIGN);
322
323 /* Copy the segments. */
324 skb_put(pPkt, pSG->cbTotal);
325 INTNETSgRead(pSG, pPkt->data);
326
327 /* Set protocol and packet_type fields. */
328 pPkt->protocol = eth_type_trans(pPkt, pDev);
329 pPkt->ip_summed = CHECKSUM_NONE;
330 if (fDstWire)
331 {
332 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
333 /* Restore ethernet header back. */
334 skb_push(pPkt, ETH_HLEN);
335 VBOX_SKB_RESET_MAC_HDR(pPkt);
336 }
337 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
338
339 return pPkt;
340}
341
342
343/**
344 * Initializes a SG list from an sk_buff.
345 *
346 * @returns Number of segments.
347 * @param pThis The instance.
348 * @param pBuf The sk_buff.
349 * @param pSG The SG.
350 * @param pvFrame The frame pointer, optional.
351 * @param cSegs The number of segments allocated for the SG.
352 * This should match the number in the mbuf exactly!
353 * @param fSrc The source of the frame.
354 * @param pGso Pointer to the GSO context if it's a GSO
355 * internal network frame. NULL if regular frame.
356 */
357DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG,
358 unsigned cSegs, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
359{
360 int i;
361 NOREF(pThis);
362
363 Assert(!skb_shinfo(pBuf)->frag_list);
364
365 if (fSrc & INTNETTRUNKDIR_WIRE)
366 {
367 /*
368 * The packet came from wire, ethernet header was removed by device driver.
369 * Restore it.
370 */
371 skb_push(pBuf, ETH_HLEN);
372 }
373
374 if (!pGsoCtx)
375 INTNETSgInitTempSegs(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/);
376 else
377 INTNETSgInitTempSegsGso(pSG, pBuf->len, cSegs, 0 /*cSegsUsed*/, pGsoCtx);
378
379#ifdef VBOXNETFLT_SG_SUPPORT
380 pSG->aSegs[0].cb = skb_headlen(pBuf);
381 pSG->aSegs[0].pv = pBuf->data;
382 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
383
384 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
385 {
386 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
387 pSG->aSegs[i+1].cb = pFrag->size;
388 pSG->aSegs[i+1].pv = kmap(pFrag->page);
389 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
390 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
391 }
392 ++i;
393
394#else
395 pSG->aSegs[0].cb = pBuf->len;
396 pSG->aSegs[0].pv = pBuf->data;
397 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
398 i = 1;
399#endif
400
401 pSG->cSegsUsed = i;
402
403#ifdef PADD_RUNT_FRAMES_FROM_HOST
404 /*
405 * Add a trailer if the frame is too small.
406 *
407 * Since we're getting to the packet before it is framed, it has not
408 * yet been padded. The current solution is to add a segment pointing
409 * to a buffer containing all zeros and pray that works for all frames...
410 */
411 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
412 {
413 static uint8_t const s_abZero[128] = {0};
414
415 AssertReturnVoid(i < cSegs);
416
417 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
418 pSG->aSegs[i].pv = (void *)&s_abZero[0];
419 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
420 pSG->cbTotal = 60;
421 pSG->cSegsUsed++;
422 Assert(i + 1 <= pSG->cSegsAlloc)
423 }
424#endif
425
426 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
427 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
428 for (i = 0; i < pSG->cSegsUsed; i++)
429 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
430 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
431}
432
433/**
434 * Packet handler,
435 *
436 * @returns 0 or EJUSTRETURN.
437 * @param pThis The instance.
438 * @param pMBuf The mbuf.
439 * @param pvFrame The start of the frame, optional.
440 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
441 * @param eProtocol The protocol.
442 */
443#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
444static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
445 struct net_device *pSkbDev,
446 struct packet_type *pPacketType,
447 struct net_device *pOrigDev)
448#else
449static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
450 struct net_device *pSkbDev,
451 struct packet_type *pPacketType)
452#endif
453{
454 PVBOXNETFLTINS pThis;
455 struct net_device *pDev;
456 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
457 pBuf, pSkbDev, pPacketType));
458#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
459 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
460 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
461#else
462 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
463 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
464#endif
465 /*
466 * Drop it immediately?
467 */
468 if (!pBuf)
469 return 0;
470
471 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
472 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
473 if (pThis->u.s.pDev != pSkbDev)
474 {
475 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
476 return 0;
477 }
478
479 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
480 if (vboxNetFltLinuxSkBufIsOur(pBuf))
481 {
482 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
483 dev_kfree_skb(pBuf);
484 return 0;
485 }
486
487#ifndef VBOXNETFLT_SG_SUPPORT
488 {
489 /*
490 * Get rid of fragmented packets, they cause too much trouble.
491 */
492 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
493 kfree_skb(pBuf);
494 if (!pCopy)
495 {
496 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
497 return 0;
498 }
499 pBuf = pCopy;
500# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
501 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
502 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
503# else
504 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
505 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
506# endif
507 }
508#endif
509
510 /* Add the packet to transmit queue and schedule the bottom half. */
511 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
512 schedule_work(&pThis->u.s.XmitTask);
513 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
514 &pThis->u.s.XmitTask, pBuf));
515
516 /* It does not really matter what we return, it is ignored by the kernel. */
517 return 0;
518}
519
520/**
521 * Calculate the number of INTNETSEG segments the socket buffer will need.
522 *
523 * @returns Segment count.
524 * @param pBuf The socket buffer.
525 */
526DECLINLINE(unsigned) vboxNetFltLinuxCalcSGSegments(struct sk_buff *pBuf)
527{
528#ifdef VBOXNETFLT_SG_SUPPORT
529 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
530#else
531 unsigned cSegs = 1;
532#endif
533#ifdef PADD_RUNT_FRAMES_FROM_HOST
534 /* vboxNetFltLinuxSkBufToSG adds a padding segment if it's a runt. */
535 if (pBuf->len < 60)
536 cSegs++;
537#endif
538 return cSegs;
539}
540
541/**
542 * Destroy the intnet scatter / gather buffer created by
543 * vboxNetFltLinuxSkBufToSG.
544 */
545static void vboxNetFltLinuxDestroySG(PINTNETSG pSG)
546{
547#ifdef VBOXNETFLT_SG_SUPPORT
548 int i;
549
550 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
551 {
552 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
553 kunmap(pSG->aSegs[i+1].pv);
554 }
555#endif
556 NOREF(pSG);
557}
558
559#ifndef LOG_ENABLED
560# define VBOXNETFLT_DUMP_PACKET(a, b, c, d) do {} while (0)
561#else
562static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
563{
564 uint8_t *pInt, *pExt;
565 static int iPacketNo = 1;
566 iPacketNo += iIncrement;
567 if (fEgress)
568 {
569 pExt = pSG->aSegs[0].pv;
570 pInt = pExt + 6;
571 }
572 else
573 {
574 pInt = pSG->aSegs[0].pv;
575 pExt = pInt + 6;
576 }
577 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
578 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
579 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
580 fEgress ? "-->" : "<--", pszWhere,
581 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
582 pSG->cbTotal, iPacketNo));
583 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
584}
585#endif /* LOG_ENABLED */
586
587#ifdef VBOXNETFLT_WITH_GSO
588
589/**
590 * Worker for vboxNetFltLinuxForwardToIntNet that checks if we can forwards a
591 * GSO socket buffer without having to segment it.
592 *
593 * @returns true on success, false if needs segmenting.
594 * @param pThis The net filter instance.
595 * @param pSkb The GSO socket buffer.
596 * @param fSrc The source.
597 * @param pGsoCtx Where to return the GSO context on success.
598 */
599static bool vboxNetFltLinuxCanForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc,
600 PPDMNETWORKGSO pGsoCtx)
601{
602 PDMNETWORKGSOTYPE enmGsoType;
603 uint16_t uEtherType;
604 unsigned int cbTransport;
605 unsigned int offTransport;
606 unsigned int cbTransportHdr;
607 unsigned uProtocol;
608 union
609 {
610 RTNETIPV4 IPv4;
611 RTNETIPV6 IPv6;
612 RTNETTCP Tcp;
613 uint8_t ab[40];
614 uint16_t au16[40/2];
615 uint32_t au32[40/4];
616 } Buf;
617
618 /*
619 * Check the GSO properties of the socket buffer and make sure it fits.
620 */
621 /** @todo Figure out how to handle SKB_GSO_TCP_ECN! */
622 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCPV6 | SKB_GSO_TCPV4) ))
623 {
624 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_type=%#x\n", skb_shinfo(pSkb)->gso_type));
625 return false;
626 }
627 if (RT_UNLIKELY( skb_shinfo(pSkb)->gso_size < 1
628 || pSkb->len > VBOX_MAX_GSO_SIZE ))
629 {
630 Log5(("vboxNetFltLinuxCanForwardAsGso: gso_size=%#x skb_len=%#x (max=%#x)\n", skb_shinfo(pSkb)->gso_size, pSkb->len, VBOX_MAX_GSO_SIZE));
631 return false;
632 }
633 if (RT_UNLIKELY(fSrc & INTNETTRUNKDIR_WIRE))
634 {
635 Log5(("vboxNetFltLinuxCanForwardAsGso: fSrc=wire\n"));
636 return false;
637 }
638
639 /*
640 * skb_gso_segment does the following. Do we need to do it as well?
641 */
642 skb_reset_mac_header(pSkb);
643 pSkb->mac_len = pSkb->network_header - pSkb->mac_header;
644
645 /*
646 * Switch on the ethertype.
647 */
648 uEtherType = pSkb->protocol;
649 if ( uEtherType == RT_H2N_U16_C(RTNET_ETHERTYPE_VLAN)
650 && pSkb->mac_len == sizeof(RTNETETHERHDR) + sizeof(uint32_t))
651 {
652 uint16_t const *puEtherType = skb_header_pointer(pSkb, sizeof(RTNETETHERHDR) + sizeof(uint16_t), sizeof(uint16_t), &Buf);
653 if (puEtherType)
654 uEtherType = *puEtherType;
655 }
656 switch (uEtherType)
657 {
658 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV4):
659 {
660 unsigned int cbHdr;
661 PCRTNETIPV4 pIPv4 = (PCRTNETIPV4)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv4), &Buf);
662 if (RT_UNLIKELY(!pIPv4))
663 {
664 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv4 hdr\n"));
665 return false;
666 }
667
668 cbHdr = pIPv4->ip_hl * 4;
669 cbTransport = RT_N2H_U16(pIPv4->ip_len);
670 if (RT_UNLIKELY( cbHdr < RTNETIPV4_MIN_LEN
671 || cbHdr > cbTransport ))
672 {
673 Log5(("vboxNetFltLinuxCanForwardAsGso: invalid IPv4 lengths: ip_hl=%u ip_len=%u\n", pIPv4->ip_hl, RT_N2H_U16(pIPv4->ip_len)));
674 return false;
675 }
676 cbTransport -= cbHdr;
677 offTransport = pSkb->mac_len + cbHdr;
678 uProtocol = pIPv4->ip_p;
679 if (uProtocol == RTNETIPV4_PROT_TCP)
680 enmGsoType = PDMNETWORKGSOTYPE_IPV4_TCP;
681 else if (uProtocol == RTNETIPV4_PROT_UDP)
682 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
683 else /** @todo IPv6: 4to6 tunneling */
684 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
685 break;
686 }
687
688 case RT_H2N_U16_C(RTNET_ETHERTYPE_IPV6):
689 {
690 PCRTNETIPV6 pIPv6 = (PCRTNETIPV6)skb_header_pointer(pSkb, pSkb->mac_len, sizeof(Buf.IPv6), &Buf);
691 if (RT_UNLIKELY(!pIPv6))
692 {
693 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access IPv6 hdr\n"));
694 return false;
695 }
696
697 cbTransport = RT_N2H_U16(pIPv6->ip6_plen);
698 offTransport = pSkb->mac_len + sizeof(RTNETIPV6);
699 uProtocol = pIPv6->ip6_nxt;
700 /** @todo IPv6: Dig our way out of the other headers. */
701 if (uProtocol == RTNETIPV4_PROT_TCP)
702 enmGsoType = PDMNETWORKGSOTYPE_IPV6_TCP;
703 else if (uProtocol == RTNETIPV4_PROT_UDP)
704 enmGsoType = PDMNETWORKGSOTYPE_IPV4_UDP;
705 else
706 enmGsoType = PDMNETWORKGSOTYPE_INVALID;
707 break;
708 }
709
710 default:
711 Log5(("vboxNetFltLinuxCanForwardAsGso: uEtherType=%#x\n", RT_H2N_U16(uEtherType)));
712 return false;
713 }
714
715 if (enmGsoType == PDMNETWORKGSOTYPE_INVALID)
716 {
717 Log5(("vboxNetFltLinuxCanForwardAsGso: Unsupported protocol %d\n", uProtocol));
718 return false;
719 }
720
721 if (RT_UNLIKELY( offTransport + cbTransport <= offTransport
722 || offTransport + cbTransport > pSkb->len
723 || cbTransport < (uProtocol == RTNETIPV4_PROT_TCP ? RTNETTCP_MIN_LEN : RTNETUDP_MIN_LEN)) )
724 {
725 Log5(("vboxNetFltLinuxCanForwardAsGso: Bad transport length; off=%#x + cb=%#x => %#x; skb_len=%#x (%s)\n",
726 offTransport, cbTransport, offTransport + cbTransport, pSkb->len, PDMNetGsoTypeName(enmGsoType) ));
727 return false;
728 }
729
730 /*
731 * Check the TCP/UDP bits.
732 */
733 if (uProtocol == RTNETIPV4_PROT_TCP)
734 {
735 PCRTNETTCP pTcp = (PCRTNETTCP)skb_header_pointer(pSkb, offTransport, sizeof(Buf.Tcp), &Buf);
736 if (RT_UNLIKELY(!pTcp))
737 {
738 Log5(("vboxNetFltLinuxCanForwardAsGso: failed to access TCP hdr\n"));
739 return false;
740 }
741
742 cbTransportHdr = pTcp->th_off * 4;
743 if (RT_UNLIKELY( cbTransportHdr < RTNETTCP_MIN_LEN
744 || cbTransportHdr > cbTransport
745 || offTransport + cbTransportHdr >= UINT8_MAX
746 || offTransport + cbTransportHdr >= pSkb->len ))
747 {
748 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for TCP header; off=%#x cb=%#x skb_len=%#x\n", offTransport, cbTransportHdr, pSkb->len));
749 return false;
750 }
751
752 }
753 else
754 {
755 Assert(uProtocol == RTNETIPV4_PROT_UDP);
756 cbTransportHdr = sizeof(RTNETUDP);
757 if (RT_UNLIKELY( offTransport + cbTransportHdr >= UINT8_MAX
758 || offTransport + cbTransportHdr >= pSkb->len ))
759 {
760 Log5(("vboxNetFltLinuxCanForwardAsGso: No space for UDP header; off=%#x skb_len=%#x\n", offTransport, pSkb->len));
761 return false;
762 }
763 }
764
765 /*
766 * We're good, init the GSO context.
767 */
768 pGsoCtx->u8Type = enmGsoType;
769 pGsoCtx->cbHdrs = offTransport + cbTransportHdr;
770 pGsoCtx->cbMaxSeg = skb_shinfo(pSkb)->gso_size;
771 pGsoCtx->offHdr1 = pSkb->mac_len;
772 pGsoCtx->offHdr2 = offTransport;
773 pGsoCtx->au8Unused[0] = 0;
774 pGsoCtx->au8Unused[1] = 0;
775
776 return true;
777}
778
779/**
780 * Forward the socket buffer as a GSO internal network frame.
781 *
782 * @returns IPRT status code.
783 * @param pThis The net filter instance.
784 * @param pSkb The GSO socket buffer.
785 * @param fSrc The source.
786 * @param pGsoCtx Where to return the GSO context on success.
787 */
788static int vboxNetFltLinuxForwardAsGso(PVBOXNETFLTINS pThis, struct sk_buff *pSkb, uint32_t fSrc, PCPDMNETWORKGSO pGsoCtx)
789{
790 int rc;
791 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pSkb);
792 if (RT_LIKELY(cSegs <= MAX_SKB_FRAGS + 1))
793 {
794 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
795 if (RT_LIKELY(pSG))
796 {
797 vboxNetFltLinuxSkBufToSG(pThis, pSkb, pSG, cSegs, fSrc, pGsoCtx);
798
799 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
800 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
801
802 vboxNetFltLinuxDestroySG(pSG);
803 rc = VINF_SUCCESS;
804 }
805 else
806 {
807 Log(("VBoxNetFlt: Dropping the sk_buff (failure case).\n"));
808 rc = VERR_NO_MEMORY;
809 }
810 }
811 else
812 {
813 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
814 rc = VERR_INTERNAL_ERROR_3;
815 }
816
817 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
818 dev_kfree_skb(pSkb);
819 return rc;
820}
821
822#endif /* VBOXNETFLT_WITH_GSO */
823
824/**
825 * Worker for vboxNetFltLinuxForwardToIntNet.
826 *
827 * @returns VINF_SUCCESS or VERR_NO_MEMORY.
828 * @param pThis The net filter instance.
829 * @param pBuf The socket buffer.
830 * @param fSrc The source.
831 */
832static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
833{
834 int rc;
835 unsigned cSegs = vboxNetFltLinuxCalcSGSegments(pBuf);
836 if (cSegs <= MAX_SKB_FRAGS + 1)
837 {
838 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
839 if (RT_LIKELY(pSG))
840 {
841 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc, NULL /*pGsoCtx*/);
842
843 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
844 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
845
846 vboxNetFltLinuxDestroySG(pSG);
847 rc = VINF_SUCCESS;
848 }
849 else
850 {
851 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
852 rc = VERR_NO_MEMORY;
853 }
854 }
855 else
856 {
857 Log(("VBoxNetFlt: Bad sk_buff? cSegs=%#x.\n", cSegs));
858 rc = VERR_INTERNAL_ERROR_3;
859 }
860
861 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
862 dev_kfree_skb(pBuf);
863 return rc;
864}
865
866static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
867{
868 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
869
870#ifdef VBOXNETFLT_WITH_GSO
871 if (skb_is_gso(pBuf))
872 {
873 PDMNETWORKGSO GsoCtx;
874 Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x ip_summed=%d\n",
875 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, pBuf->ip_summed));
876 if ( (skb_shinfo(pBuf)->gso_type & (SKB_GSO_UDP | SKB_GSO_TCPV6 | SKB_GSO_TCPV4))
877 && vboxNetFltLinuxCanForwardAsGso(pThis, pBuf, fSrc, &GsoCtx) )
878 vboxNetFltLinuxForwardAsGso(pThis, pBuf, fSrc, &GsoCtx);
879 else
880 {
881 /* Need to segment the packet */
882 struct sk_buff *pNext;
883 struct sk_buff *pSegment = skb_gso_segment(pBuf, 0 /*supported features*/);
884 if (IS_ERR(pSegment))
885 {
886 dev_kfree_skb(pBuf);
887 LogRel(("VBoxNetFlt: Failed to segment a packet (%d).\n", PTR_ERR(pSegment)));
888 return;
889 }
890
891 for (; pSegment; pSegment = pNext)
892 {
893 Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
894 pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
895 pNext = pSegment->next;
896 pSegment->next = 0;
897 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
898 }
899 dev_kfree_skb(pBuf);
900 }
901 }
902 else
903#endif /* VBOXNETFLT_WITH_GSO */
904 {
905 if (pBuf->ip_summed == CHECKSUM_PARTIAL && pBuf->pkt_type == PACKET_OUTGOING)
906 {
907#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
908 /*
909 * Try to work around the problem with CentOS 4.7 and 5.2 (2.6.9
910 * and 2.6.18 kernels), they pass wrong 'h' pointer down. We take IP
911 * header length from the header itself and reconstruct 'h' pointer
912 * to TCP (or whatever) header.
913 */
914 unsigned char *tmp = pBuf->h.raw;
915 if (pBuf->h.raw == pBuf->nh.raw && pBuf->protocol == htons(ETH_P_IP))
916 pBuf->h.raw = pBuf->nh.raw + pBuf->nh.iph->ihl * 4;
917#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
918 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
919 {
920 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
921 dev_kfree_skb(pBuf);
922 return;
923 }
924#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
925 /* Restore the original (wrong) pointer. */
926 pBuf->h.raw = tmp;
927#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) */
928 }
929 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
930 }
931}
932
933/**
934 * Work queue handler that forwards the socket buffers queued by
935 * vboxNetFltLinuxPacketHandler to the internal network.
936 *
937 * @param pWork The work queue.
938 */
939#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
940static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
941#else
942static void vboxNetFltLinuxXmitTask(void *pWork)
943#endif
944{
945 PVBOXNETFLTINS pThis = VBOX_FLT_XT_TO_INST(pWork);
946 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
947 struct sk_buff *pBuf;
948
949 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
950
951 /*
952 * Active? Retain the instance and increment the busy counter.
953 */
954 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
955 if (ASMAtomicUoReadBool(&pThis->fActive))
956 {
957 vboxNetFltRetain(pThis, true /* fBusy */);
958 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
959
960 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != NULL)
961 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
962
963 vboxNetFltRelease(pThis, true /* fBusy */);
964 }
965 else
966 {
967 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
968 /** @todo Shouldn't we just drop the packets here? There is little point in
969 * making them accumulate when the VM is paused and it'll only waste
970 * kernel memory anyway... Hmm. maybe wait a short while (2-5 secs)
971 * before start draining the packets (goes for the intnet ring buf
972 * too)? */
973 }
974}
975
976/**
977 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
978 *
979 * @returns VBox status code.
980 * @param pThis The instance.
981 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
982 * flood the release log.
983 */
984static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
985{
986 struct packet_type *pt;
987 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
988
989 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
990
991 if (!pDev)
992 {
993 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
994 return VERR_INTNET_FLT_IF_NOT_FOUND;
995 }
996
997 dev_hold(pDev);
998 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
999 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
1000 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1001
1002 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1003 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
1004 /*
1005 * Get the mac address while we still have a valid ifnet reference.
1006 */
1007 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
1008
1009 pt = &pThis->u.s.PacketType;
1010 pt->type = __constant_htons(ETH_P_ALL);
1011 pt->dev = pDev;
1012 pt->func = vboxNetFltLinuxPacketHandler;
1013 dev_add_pack(pt);
1014 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1015 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1016 if (pDev)
1017 {
1018 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
1019 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
1020 pDev = NULL; /* don't dereference it */
1021 }
1022 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1023 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
1024
1025 /* Release the interface on failure. */
1026 if (pDev)
1027 {
1028 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1029 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
1030 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1031 dev_put(pDev);
1032 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1033 }
1034
1035 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
1036 return VINF_SUCCESS;
1037}
1038
1039
1040static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
1041{
1042 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1043
1044 Assert(!pThis->fDisconnectedFromHost);
1045 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1046 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
1047 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
1048 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
1049 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1050
1051 dev_remove_pack(&pThis->u.s.PacketType);
1052 skb_queue_purge(&pThis->u.s.XmitQueue);
1053 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1054 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1055 dev_put(pDev);
1056
1057 return NOTIFY_OK;
1058}
1059
1060static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
1061{
1062 /* Check if we are not suspended and promiscuous mode has not been set. */
1063 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
1064 {
1065 /* Note that there is no need for locking as the kernel got hold of the lock already. */
1066 dev_set_promiscuity(pDev, 1);
1067 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
1068 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1069 }
1070 else
1071 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1072 return NOTIFY_OK;
1073}
1074
1075static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
1076{
1077 /* Undo promiscuous mode if we has set it. */
1078 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
1079 {
1080 /* Note that there is no need for locking as the kernel got hold of the lock already. */
1081 dev_set_promiscuity(pDev, -1);
1082 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
1083 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1084 }
1085 else
1086 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1087 return NOTIFY_OK;
1088}
1089
1090static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
1091
1092{
1093 int rc = NOTIFY_OK;
1094#ifdef DEBUG
1095 char *pszEvent = "<unknown>";
1096#endif
1097 struct net_device *pDev = (struct net_device *)ptr;
1098 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
1099
1100#ifdef DEBUG
1101 switch (ulEventType)
1102 {
1103 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
1104 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
1105 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
1106 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
1107 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
1108 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
1109 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
1110 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
1111 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
1112 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
1113 }
1114 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
1115 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
1116#endif
1117 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
1118 {
1119 vboxNetFltLinuxAttachToInterface(pThis, pDev);
1120 }
1121 else
1122 {
1123 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1124 if (pDev != ptr)
1125 return NOTIFY_OK;
1126 rc = NOTIFY_OK;
1127 switch (ulEventType)
1128 {
1129 case NETDEV_UNREGISTER:
1130 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
1131 break;
1132 case NETDEV_UP:
1133 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
1134 break;
1135 case NETDEV_GOING_DOWN:
1136 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
1137 break;
1138 case NETDEV_CHANGENAME:
1139 break;
1140 }
1141 }
1142
1143 return rc;
1144}
1145
1146bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
1147{
1148 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
1149}
1150
1151int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
1152{
1153 struct net_device * pDev;
1154 int err;
1155 int rc = VINF_SUCCESS;
1156
1157 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
1158
1159 pDev = vboxNetFltLinuxRetainNetDev(pThis);
1160 if (pDev)
1161 {
1162 /*
1163 * Create a sk_buff for the gather list and push it onto the wire.
1164 */
1165 if (fDst & INTNETTRUNKDIR_WIRE)
1166 {
1167 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
1168 if (pBuf)
1169 {
1170 vboxNetFltDumpPacket(pSG, true, "wire", 1);
1171 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1172 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
1173 err = dev_queue_xmit(pBuf);
1174 if (err)
1175 rc = RTErrConvertFromErrno(err);
1176 }
1177 else
1178 rc = VERR_NO_MEMORY;
1179 }
1180
1181 /*
1182 * Create a sk_buff for the gather list and push it onto the host stack.
1183 */
1184 if (fDst & INTNETTRUNKDIR_HOST)
1185 {
1186 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
1187 if (pBuf)
1188 {
1189 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
1190 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1191 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
1192 err = netif_rx_ni(pBuf);
1193 if (err)
1194 rc = RTErrConvertFromErrno(err);
1195 }
1196 else
1197 rc = VERR_NO_MEMORY;
1198 }
1199
1200 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1201 }
1202
1203 return rc;
1204}
1205
1206
1207bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
1208{
1209 bool fRc = false;
1210 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1211 if (pDev)
1212 {
1213 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1214 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1215 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1216 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1217 }
1218 return fRc;
1219}
1220
1221
1222void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
1223{
1224 *pMac = pThis->u.s.Mac;
1225}
1226
1227
1228bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
1229{
1230 /* ASSUMES that the MAC address never changes. */
1231 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
1232 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
1233 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
1234}
1235
1236
1237void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
1238{
1239 struct net_device * pDev;
1240
1241 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
1242 pThis, pThis->szName, fActive?"true":"false",
1243 pThis->fDisablePromiscuous?"true":"false"));
1244
1245 if (pThis->fDisablePromiscuous)
1246 return;
1247
1248 pDev = vboxNetFltLinuxRetainNetDev(pThis);
1249 if (pDev)
1250 {
1251 /*
1252 * This api is a bit weird, the best reference is the code.
1253 *
1254 * Also, we have a bit or race conditions wrt the maintance of
1255 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
1256 */
1257#ifdef LOG_ENABLED
1258 u_int16_t fIf;
1259 unsigned const cPromiscBefore = pDev->promiscuity;
1260#endif
1261 if (fActive)
1262 {
1263 Assert(!pThis->u.s.fPromiscuousSet);
1264
1265 rtnl_lock();
1266 dev_set_promiscuity(pDev, 1);
1267 rtnl_unlock();
1268 pThis->u.s.fPromiscuousSet = true;
1269 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1270 }
1271 else
1272 {
1273 if (pThis->u.s.fPromiscuousSet)
1274 {
1275 rtnl_lock();
1276 dev_set_promiscuity(pDev, -1);
1277 rtnl_unlock();
1278 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, pDev->promiscuity));
1279 }
1280 pThis->u.s.fPromiscuousSet = false;
1281
1282#ifdef LOG_ENABLED
1283 fIf = dev_get_flags(pDev);
1284 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, pDev->promiscuity));
1285#endif
1286 }
1287
1288 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1289 }
1290}
1291
1292
1293int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
1294{
1295 /* Nothing to do here. */
1296 return VINF_SUCCESS;
1297}
1298
1299
1300int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1301{
1302 /* Nothing to do here. */
1303 return VINF_SUCCESS;
1304}
1305
1306
1307void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1308{
1309 struct net_device *pDev;
1310 bool fRegistered;
1311 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1312
1313 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1314 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1315 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1316 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1317 if (fRegistered)
1318 {
1319 dev_remove_pack(&pThis->u.s.PacketType);
1320 skb_queue_purge(&pThis->u.s.XmitQueue);
1321 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1322 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1323 dev_put(pDev);
1324 }
1325 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1326 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1327 module_put(THIS_MODULE);
1328}
1329
1330
1331int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
1332{
1333 int err;
1334 NOREF(pvContext);
1335
1336 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1337 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1338 if (err)
1339 return VERR_INTNET_FLT_IF_FAILED;
1340 if (!pThis->u.s.fRegistered)
1341 {
1342 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1343 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1344 return VERR_INTNET_FLT_IF_NOT_FOUND;
1345 }
1346
1347 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1348 if ( pThis->fDisconnectedFromHost
1349 || !try_module_get(THIS_MODULE))
1350 return VERR_INTNET_FLT_IF_FAILED;
1351
1352 return VINF_SUCCESS;
1353}
1354
1355int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1356{
1357 /*
1358 * Init the linux specific members.
1359 */
1360 pThis->u.s.pDev = NULL;
1361 pThis->u.s.fRegistered = false;
1362 pThis->u.s.fPromiscuousSet = false;
1363 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1364 skb_queue_head_init(&pThis->u.s.XmitQueue);
1365#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1366 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1367#else
1368 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1369#endif
1370
1371 return VINF_SUCCESS;
1372}
1373
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette