VirtualBox

Ignore:
Timestamp:
Apr 27, 2010 2:05:08 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
60745
Message:

vboxNetFltMaybeRediscovered: Don't do rediscovery with preemption disabled.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/HostDrivers/VBoxNetFlt/VBoxNetFlt.c

    r28800 r28829  
    1818/** @page pg_netflt     VBoxNetFlt - Network Interface Filter
    1919 *
    20  * This is a kernel module that attaches to a real interface on the host
    21  * and filters and injects packets.
     20 * This is a kernel module that attaches to a real interface on the host and
     21 * filters and injects packets.
    2222 *
    2323 * In the big picture we're one of the three trunk interface on the internal
     
    2525 *
    2626 *
    27  * @section  sec_netflt_msc      Locking / Sequence Diagrams
     27 * @section  sec_netflt_locking     Locking and Potential Races
     28 *
     29 * The main challenge here is to make sure the netfilter and internal network
     30 * instances won't be destroyed while someone is calling into them.
     31 *
     32 * The main calls into or out of of the filter driver are:
     33 *      - Send.
     34 *      - Async send completion (not implemented yet)
     35 *      - Release by the internal network.
     36 *      - Receive.
     37 *      - Disappearance of the host networking interface.
     38 *      - Reappearance of the host networking interface.
     39 *
     40 * The latter two calls are can be caused by driver unloading/loading or the
     41 * device being physical unplugged (e.g. a USB network device).  Actually, the
     42 * unload scenario must fervently be prevent as it will cause panics because the
     43 * internal network will assume the trunk is around until it releases it.
     44 * @todo Need to figure which host allow unloading and block/fix it.
     45 *
     46 * Currently the netfilter instance lives until the internal network releases
     47 * it. So, it is the internal networks responsibility to make sure there are no
     48 * active calls when it releases the trunk and destroys the network.  The
     49 * netfilter assists in this by providing INTNETTRUNKIFPORT::pfnSetState and
     50 * INTNETTRUNKIFPORT::pfnWaitForIdle.  The trunk state is used to enable/disable
     51 * promiscuous mode on the hardware NIC (or similar activation) as well
     52 * indicating that disconnect is imminent and no further calls shall be made
     53 * into the internal network.  After changing the state to disconnecting and
     54 * prior to invoking INTNETTRUNKIFPORT::pfnDisconnectAndRelease, the internal
     55 * network will use INTNETTRUNKIFPORT::pfnWaitForIdle to wait for any still
     56 * active calls to complete.
     57 *
     58 * The netfilter employs a busy counter and an internal state in addition to the
     59 * public trunk state.  All these variables are protected using a spinlock.
     60 *
     61 *
     62 * @section  sec_netflt_msc     Locking / Sequence Diagrams - OBSOLETE
     63 *
     64 * !OBSOLETE! - THIS WAS THE OLD APPROACH!
    2865 *
    2966 * This secion contains a few sequence diagrams describing the problematic
     
    3269 * The thing that makes it all a bit problematic is that multiple events may
    3370 * happen at the same time, and that we have to be very careful to avoid
    34  * deadlocks caused by mixing our locks with the ones in the host kernel.
    35  * The main events are receive, send, async send completion, disappearance of
    36  * the host networking interface and it's reappearance. The latter two events
    37  * are can be caused by driver unloading/loading or the device being physical
     71 * deadlocks caused by mixing our locks with the ones in the host kernel. The
     72 * main events are receive, send, async send completion, disappearance of the
     73 * host networking interface and its reappearance.  The latter two events are
     74 * can be caused by driver unloading/loading or the device being physical
    3875 * unplugged (e.g. a USB network device).
    3976 *
     
    4986 *
    5087 *
    51  * @subsection subsec_netflt_msc_dis_rel    Disconnect from the network and release
     88 * @subsection subsec_netflt_msc_dis_rel    Disconnect from the network and release - OBSOLETE
    5289 *
    5390 * @msc
     
    114151 *
    115152 *
    116  * @subsection subsec_netflt_msc_hif_rm    Host Interface Removal
     153 * @subsection subsec_netflt_msc_hif_rm    Host Interface Removal - OBSOLETE
    117154 *
    118155 * The ifnet_t (pIf) is a tricky customer as any reference to it can potentially
     
    154191 *
    155192 *
    156  * @subsection subsec_netflt_msc_hif_rm    Host Interface Rediscovery
     193 * @subsection subsec_netflt_msc_hif_rm    Host Interface Rediscovery - OBSOLETE
    157194 *
    158195 * The rediscovery is performed when we receive a send request and a certain
     
    338375static bool vboxNetFltMaybeRediscovered(PVBOXNETFLTINS pThis)
    339376{
    340     RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    341     uint64_t Now = RTTimeNanoTS();
    342     bool fRediscovered;
    343     bool fDoIt;
     377    RTSPINLOCKTMP   Tmp = RTSPINLOCKTMP_INITIALIZER;
     378    uint64_t        Now;
     379    bool            fRediscovered;
     380    bool            fDoIt;
     381
     382    /*
     383     * Don't do rediscovery if we're called with preemption disabled.
     384     *
     385     * Note! This may cause trouble if we're always called with preemptioni
     386     *       disabled and vboxNetFltOsMaybeRediscovered actually does some real
     387     *       work.  For the time being though, only Darwin and FreeBSD depends
     388     *       on these call outs and neither supports sending with preemption
     389     *       disabled.
     390     */
     391    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     392        return false;
    344393
    345394    /*
    346395     * Rediscovered already? Time to try again?
    347396     */
    348     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     397    Now = RTTimeNanoTS();
     398    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
    349399
    350400    fRediscovered = !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
     
    355405        ASMAtomicWriteBool(&pThis->fRediscoveryPending, true);
    356406
    357     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     407    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
    358408
    359409    /*
     
    372422
    373423        if (fRediscovered)
    374             vboxNetFltPortOsSetActive(pThis, pThis->fActive);
     424            /** @todo this isn't 100% serialized. */
     425            vboxNetFltPortOsSetActive(pThis, pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE);
    375426    }
    376427
     
    394445    Assert(pThis->MyPort.u32Version == INTNETTRUNKIFPORT_VERSION);
    395446    AssertReturn(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Connected, VERR_INVALID_STATE);
    396     Assert(pThis->fActive);
    397447
    398448    /*
     
    400450     * before invoking the OS specific code.
    401451     */
    402     vboxNetFltRetain(pThis, true /* fBusy */);
    403     if (    !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost)
    404         ||  vboxNetFltMaybeRediscovered(pThis))
    405         rc = vboxNetFltPortOsXmit(pThis, pSG, fDst);
    406     vboxNetFltRelease(pThis, true /* fBusy */);
     452    if (RT_LIKELY(vboxNetFltTryRetainBusyActive(pThis)))
     453    {
     454        if (    !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost)
     455            ||  vboxNetFltMaybeRediscovered(pThis))
     456            rc = vboxNetFltPortOsXmit(pThis, pSG, fDst);
     457        vboxNetFltRelease(pThis, true /* fBusy */);
     458    }
    407459
    408460    return rc;
     
    424476    Assert(pThis->MyPort.u32Version == INTNETTRUNKIFPORT_VERSION);
    425477    AssertReturn(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Connected, VERR_INVALID_STATE);
    426     AssertReturn(!pThis->fActive, VERR_INVALID_STATE);
     478    AssertReturn(pThis->enmTrunkState == INTNETTRUNKIFSTATE_DISCONNECTING, VERR_INVALID_STATE);
    427479
    428480    /*
     
    442494
    443495/**
    444  * @copydoc INTNETTRUNKIFPORT::pfnSetActive
    445  */
    446 static DECLCALLBACK(bool) vboxNetFltPortSetActive(PINTNETTRUNKIFPORT pIfPort, bool fActive)
    447 {
    448     PVBOXNETFLTINS pThis = IFPORT_2_VBOXNETFLTINS(pIfPort);
     496 * @copydoc INTNETTRUNKIFPORT::pfnSetState
     497 */
     498static DECLCALLBACK(INTNETTRUNKIFSTATE) vboxNetFltPortSetState(PINTNETTRUNKIFPORT pIfPort, INTNETTRUNKIFSTATE enmState)
     499{
     500    PVBOXNETFLTINS      pThis = IFPORT_2_VBOXNETFLTINS(pIfPort);
     501    RTSPINLOCKTMP       Tmp   = RTSPINLOCKTMP_INITIALIZER;
     502    INTNETTRUNKIFSTATE  enmOldTrunkState;
    449503
    450504    /*
     
    454508    AssertPtr(pThis->pGlobals);
    455509    Assert(pThis->MyPort.u32Version == INTNETTRUNKIFPORT_VERSION);
    456     AssertReturn(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Connected, false);
    457 
    458     /*
    459      * We're assuming that the caller is serializing the calls, so we don't
    460      * have to be extremely careful here. Just update first and then call
    461      * the OS specific code, the update must be serialized for various reasons.
    462      */
    463     if (ASMAtomicReadBool(&pThis->fActive) != fActive)
    464     {
    465         RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    466         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
    467         ASMAtomicWriteBool(&pThis->fActive, fActive);
    468         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
    469 
    470         vboxNetFltPortOsSetActive(pThis, fActive);
    471     }
    472     else
    473         fActive = !fActive;
    474     return !fActive;
     510    AssertReturn(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Connected, INTNETTRUNKIFSTATE_INVALID);
     511    AssertReturn(enmState > INTNETTRUNKIFSTATE_INVALID && enmState < INTNETTRUNKIFSTATE_END,
     512                 INTNETTRUNKIFSTATE_INVALID);
     513
     514    /*
     515     * Take the lock and change the state.
     516     */
     517    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
     518    enmOldTrunkState = pThis->enmTrunkState;
     519    if (enmOldTrunkState != enmState)
     520        ASMAtomicWriteU32((uint32_t volatile *)&pThis->enmTrunkState, enmState);
     521    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
     522
     523    /*
     524     * If the state change indicates that the trunk has become active or
     525     * inactive, call the OS specific part so they can work the promiscuous
     526     * settings and such.
     527     * Note! The caller makes sure there are no concurrent pfnSetState calls.
     528     */
     529    if ((enmOldTrunkState == INTNETTRUNKIFSTATE_ACTIVE) != (enmState == INTNETTRUNKIFSTATE_ACTIVE))
     530        vboxNetFltPortOsSetActive(pThis, (enmState == INTNETTRUNKIFSTATE_ACTIVE));
     531
     532    return enmOldTrunkState;
    475533}
    476534
     
    496554
    497555    Assert(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Connected);
    498     Assert(!pThis->fActive);
     556    Assert(pThis->enmTrunkState == INTNETTRUNKIFSTATE_DISCONNECTING);
    499557    Assert(!pThis->fRediscoveryPending);
    500558    Assert(!pThis->cBusy);
     
    503561     * Disconnect and release it.
    504562     */
    505     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     563    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
    506564    vboxNetFltSetState(pThis, kVBoxNetFltInsState_Disconnecting);
    507     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     565    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
    508566
    509567    vboxNetFltOsDisconnectIt(pThis);
     
    511569
    512570#ifdef VBOXNETFLT_STATIC_CONFIG
    513     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     571    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
    514572    vboxNetFltSetState(pThis, kVBoxNetFltInsState_Unconnected);
    515     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     573    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
    516574#endif
    517575
     
    543601    Assert(vboxNetFltGetState(pThis) == kVBoxNetFltInsState_Disconnecting);
    544602#endif
    545     Assert(!pThis->fActive);
     603    Assert(pThis->enmTrunkState == INTNETTRUNKIFSTATE_DISCONNECTING);
    546604    Assert(!pThis->fRediscoveryPending);
    547605    Assert(!pThis->cRefs);
     
    684742
    685743/**
     744 * Tries to retain the device as busy if the trunk is active.
     745 *
     746 * This is used before calling pfnRecv or pfnPreRecv.
     747 *
     748 * @returns true if we succeeded in retaining a busy reference to the active
     749 *          device.  false if we failed.
     750 * @param   pThis           The instance.
     751 */
     752DECLHIDDEN(bool) vboxNetFltTryRetainBusyActive(PVBOXNETFLTINS pThis)
     753{
     754    RTSPINLOCKTMP   Tmp = RTSPINLOCKTMP_INITIALIZER;
     755    uint32_t        cRefs;
     756    bool            fRc;
     757
     758    /*
     759     * Paranoid Android.
     760     */
     761    AssertPtr(pThis);
     762    Assert(pThis->MyPort.u32Version == INTNETTRUNKIFPORT_VERSION);
     763    Assert(pThis->MyPort.u32VersionEnd == INTNETTRUNKIFPORT_VERSION);
     764    Assert(   vboxNetFltGetState(pThis) > kVBoxNetFltInsState_Invalid
     765           && vboxNetFltGetState(pThis) < kVBoxNetFltInsState_Destroyed);
     766    AssertPtr(pThis->pGlobals);
     767    Assert(pThis->hEventIdle != NIL_RTSEMEVENT);
     768    Assert(pThis->hSpinlock != NIL_RTSPINLOCK);
     769    Assert(pThis->szName[0]);
     770
     771    /*
     772     * Do the retaining and checking behind the spinlock.
     773     */
     774    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
     775    fRc = pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE;
     776    if (fRc)
     777    {
     778        cRefs = ASMAtomicIncU32(&pThis->cRefs);
     779        AssertMsg(cRefs > 1 && cRefs < UINT32_MAX / 2, ("%d\n", cRefs)); NOREF(cRefs);
     780
     781        cRefs = ASMAtomicIncU32(&pThis->cBusy);
     782        AssertMsg(cRefs >= 1 && cRefs < UINT32_MAX / 2, ("%d\n", cRefs)); NOREF(cRefs);
     783    }
     784    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
     785
     786    return fRc;
     787}
     788
     789
     790/**
     791 * Tries to retain the device as busy if the trunk is not disconnecting.
     792 *
     793 * This is used before reporting stuff to the internal network.
     794 *
     795 * @returns true if we succeeded in retaining a busy reference to the active
     796 *          device.  false if we failed.
     797 * @param   pThis           The instance.
     798 */
     799DECLHIDDEN(bool) vboxNetFltTryRetainBusyNotDisconnected(PVBOXNETFLTINS pThis)
     800{
     801    RTSPINLOCKTMP   Tmp = RTSPINLOCKTMP_INITIALIZER;
     802    uint32_t        cRefs;
     803    bool            fRc;
     804
     805    /*
     806     * Paranoid Android.
     807     */
     808    AssertPtr(pThis);
     809    Assert(pThis->MyPort.u32Version == INTNETTRUNKIFPORT_VERSION);
     810    Assert(pThis->MyPort.u32VersionEnd == INTNETTRUNKIFPORT_VERSION);
     811    Assert(   vboxNetFltGetState(pThis) > kVBoxNetFltInsState_Invalid
     812           && vboxNetFltGetState(pThis) < kVBoxNetFltInsState_Destroyed);
     813    AssertPtr(pThis->pGlobals);
     814    Assert(pThis->hEventIdle != NIL_RTSEMEVENT);
     815    Assert(pThis->hSpinlock != NIL_RTSPINLOCK);
     816    Assert(pThis->szName[0]);
     817
     818    /*
     819     * Do the retaining and checking behind the spinlock.
     820     */
     821    RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
     822    fRc =  pThis->enmTrunkState == INTNETTRUNKIFSTATE_ACTIVE
     823        || pThis->enmTrunkState == INTNETTRUNKIFSTATE_INACTIVE;
     824    if (fRc)
     825    {
     826        cRefs = ASMAtomicIncU32(&pThis->cRefs);
     827        AssertMsg(cRefs > 1 && cRefs < UINT32_MAX / 2, ("%d\n", cRefs)); NOREF(cRefs);
     828
     829        cRefs = ASMAtomicIncU32(&pThis->cBusy);
     830        AssertMsg(cRefs >= 1 && cRefs < UINT32_MAX / 2, ("%d\n", cRefs)); NOREF(cRefs);
     831    }
     832    RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp);
     833
     834    return fRc;
     835}
     836
     837
     838/**
    686839 * @copydoc INTNETTRUNKIFPORT::pfnRetain
    687840 */
     
    713866     * Validate state.
    714867     */
    715     Assert(!pThis->fActive);
     868    Assert(pThis->enmTrunkState == INTNETTRUNKIFSTATE_INACTIVE);
    716869    Assert(!pThis->fRediscoveryPending);
    717870    Assert(!pThis->cBusy);
     
    736889        pThis->pSwitchPort = NULL;
    737890
    738     Assert(!pThis->fActive);
     891    Assert(pThis->enmTrunkState == INTNETTRUNKIFSTATE_INACTIVE);
    739892    return rc;
    740893}
     
    773926    pNew->MyPort.pfnRelease             = vboxNetFltPortRelease;
    774927    pNew->MyPort.pfnDisconnectAndRelease= vboxNetFltPortDisconnectAndRelease;
    775     pNew->MyPort.pfnSetActive           = vboxNetFltPortSetActive;
     928    pNew->MyPort.pfnSetState            = vboxNetFltPortSetState;
    776929    pNew->MyPort.pfnWaitForIdle         = vboxNetFltPortWaitForIdle;
    777930    pNew->MyPort.pfnXmit                = vboxNetFltPortXmit;
     
    781934    pNew->hSpinlock                     = NIL_RTSPINLOCK;
    782935    pNew->enmState                      = kVBoxNetFltInsState_Initializing;
    783     pNew->fActive                       = false;
     936    pNew->enmTrunkState                 = INTNETTRUNKIFSTATE_INACTIVE;
    784937    pNew->fDisconnectedFromHost         = false;
    785938    pNew->fRediscoveryPending           = false;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette