VirtualBox

Changeset 48044 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Aug 25, 2013 7:21:27 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: Thread-context hooks, disable preemption while touching host state on VM-exits. AMD-V still todo.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r48037 r48044  
    14921492 * @returns VBox status code.
    14931493 * @param   pVCpu       Pointer to the VMCPU.
     1494 *
     1495 * @remarks No-long-jump zone!!!
    14941496 */
    14951497VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu)
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48037 r48044  
    19471947DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19481948{
     1949    HM_DISABLE_PREEMPT_IF_NEEDED();
    19491950    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    1950 
    1951     /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
    1952        which would've done this work from the VMXR0ThreadCtxCallback(). */
    1953     RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    1954     bool fPreemptDisabled = false;
    1955     if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    1956     {
    1957         Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
    1958         RTThreadPreemptDisable(&PreemptState);
    1959         fPreemptDisabled = true;
    1960     }
    1961 
     1951    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1952
     1953    /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
     1954       and done this from the VMXR0ThreadCtxCallback(). */
    19621955    if (!pVCpu->hm.s.fLeaveDone)
    19631956    {
     
    19761969    AssertRC(rc); NOREF(rc);
    19771970
    1978     /* Restore preemption if we previous disabled it ourselves. */
    1979     if (fPreemptDisabled)
    1980         RTThreadPreemptRestore(&PreemptState);
     1971    HM_RESTORE_PREEMPT_IF_NEEDED();
    19811972}
    19821973
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48041 r48044  
    60896089static void hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    60906090{
     6091    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    60916092    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    60926093    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     
    61606161DECLINLINE(void) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    61616162{
     6163    HM_DISABLE_PREEMPT_IF_NEEDED();
    61626164    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6163 
    6164     RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    6165     bool fPreemptDisabled = false;
    6166     if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    6167     {
    6168         Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
    6169         RTThreadPreemptDisable(&PreemptState);
    6170         fPreemptDisabled = true;
    6171     }
    6172 
    6173     /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
    6174        which would've done this work from the VMXR0ThreadCtxCallback(). */
     6165    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6166
     6167    /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
     6168       and done this from the VMXR0ThreadCtxCallback(). */
    61756169    if (!pVCpu->hm.s.fLeaveDone)
    61766170    {
     
    61896183    AssertRC(rc); NOREF(rc);
    61906184
    6191     /* Restore preemption if we previous disabled it ourselves. */
    6192     if (fPreemptDisabled)
    6193         RTThreadPreemptRestore(&PreemptState);
     6185    HM_RESTORE_PREEMPT_IF_NEEDED();
    61946186}
    61956187
     
    96609652        {
    96619653            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
     9654
     9655            /* We're playing with the host CPU state here, make sure we don't preempt. */
     9656            HM_DISABLE_PREEMPT_IF_NEEDED();
    96629657            bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
    96639658
     
    96779672                     && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
    96789673                rcStrict = rcStrict2;
     9674
     9675            HM_RESTORE_PREEMPT_IF_NEEDED();
    96799676        }
    96809677    }
     
    98829879        }
    98839880
     9881        /* We're playing with the host CPU state here, make sure we can't preempt. */
     9882        HM_DISABLE_PREEMPT_IF_NEEDED();
     9883
    98849884        /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    98859885        PVM pVM = pVCpu->CTX_SUFF(pVM);
    98869886        CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    98879887        Assert(CPUMIsGuestDebugStateActive(pVCpu));
     9888
     9889        HM_RESTORE_PREEMPT_IF_NEEDED();
    98889890
    98899891#ifdef VBOX_WITH_STATISTICS
     
    1014810150         * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
    1014910151         */
     10152        HM_DISABLE_PREEMPT_IF_NEEDED();
     10153
    1015010154        pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
    1015110155        pMixedCtx->dr[6] |= uDR6;
    1015210156        if (CPUMIsGuestDebugStateActive(pVCpu))
    1015310157            ASMSetDR6(pMixedCtx->dr[6]);
     10158
     10159        HM_RESTORE_PREEMPT_IF_NEEDED();
    1015410160
    1015510161        rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     
    1020610212    AssertRCReturn(rc, rc);
    1020710213
     10214    /* We're playing with the host CPU state here, have to disable preemption. */
     10215    HM_DISABLE_PREEMPT_IF_NEEDED();
     10216
    1020810217    /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
    1020910218    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    1021210221    {
    1021310222        Assert(CPUMIsGuestFPUStateActive(pVCpu));
     10223        HM_RESTORE_PREEMPT_IF_NEEDED();
     10224
    1021410225        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    1021510226        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    1021610227        return VINF_SUCCESS;
    1021710228    }
     10229
     10230    HM_RESTORE_PREEMPT_IF_NEEDED();
    1021810231
    1021910232    /* Forward #NM to the guest. */
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r48038 r48044  
    901901        {
    902902#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     903            Assert(!VMMR0ThreadCtxHooksAreRegistered(pVCpu));
    903904            RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    904905            RTThreadPreemptDisable(&PreemptState);
     
    940941                /* Enter HM context. */
    941942                rc = HMR0Enter(pVM, pVCpu);
    942 
    943                 /* When preemption hooks are in place, enable preemption now that we're in HM context. */
    944                 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    945                 {
    946                     fPreemptRestored = true;
    947                     RTThreadPreemptRestore(&PreemptState);
    948                 }
    949 
    950943                if (RT_SUCCESS(rc))
    951944                {
    952945                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    953946
     947                    /* When preemption hooks are in place, enable preemption now that we're in HM context. */
     948                    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     949                    {
     950                        fPreemptRestored = true;
     951                        RTThreadPreemptRestore(&PreemptState);
     952                    }
     953
    954954                    /* Setup the longjmp machinery and execute guest code. */
    955955                    rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
    956956
    957                     /* Leave HM context. This deregisters thread-context hooks if any. */
     957                    /* Leave HM context. */
    958958                    int rc2 = HMR0Leave(pVM, pVCpu);
    959959                    AssertRC(rc2);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette