VirtualBox

Changeset 87487 in vbox


Ignore:
Timestamp:
Jan 29, 2021 6:06:39 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HMVMX: Moving more stuff to HMR0PERVCPU. bugref:9217

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r87480 r87487  
    485485}
    486486
     487
    487488/**
    488489 * Poke an EMT so it can perform the appropriate TLB shootdowns.
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r87469 r87487  
    786786        ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
    787787        ;
    788         cmp     byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], %2
     788        cmp     byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
    789789        mov     eax, VERR_VMX_STARTVM_PRECOND_0
    790790        jne     NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return)
     
    12531253        ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change.
    12541254        ;
    1255         cmp     byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], %2
     1255        cmp     byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2
    12561256        mov     eax, VERR_SVM_VMRUN_PRECOND_0
    12571257        jne     .failure_return
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r87480 r87487  
    731731        { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
    732732    };
    733     uintptr_t const idx = (pVCpu->hm.s.fLoadSaveGuestXcr0                             ? 1 : 0)
     733    uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                           ? 1 : 0)
    734734                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0)
    735735                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_EXIT  ? 4 : 0);
     
    12911291    {
    12921292        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    1293         pVCpu->hm.s.fForceTLBFlush = true;
     1293        pVCpu->hmr0.s.fForceTLBFlush = true;
    12941294        fNewAsid = true;
    12951295    }
     
    13011301    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    13021302    {
    1303         pVCpu->hm.s.fForceTLBFlush = true;
     1303        pVCpu->hmr0.s.fForceTLBFlush = true;
    13041304        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    13051305    }
     
    13241324    {
    13251325        pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
    1326         if (pVCpu->hm.s.fForceTLBFlush)
     1326        if (pVCpu->hmr0.s.fForceTLBFlush)
    13271327        {
    13281328            /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
     
    13601360            }
    13611361
    1362             pVCpu->hm.s.fForceTLBFlush = false;
     1362            pVCpu->hmr0.s.fForceTLBFlush = false;
    13631363        }
    13641364    }
     
    16691669    /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    16701670    bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    1671     if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
    1672     {
    1673         pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     1671    if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
     1672    {
     1673        pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    16741674        hmR0SvmUpdateVmRunFunction(pVCpu);
    16751675    }
     
    19181918    if (fStepping)
    19191919    {
    1920         pVCpu->hm.s.fClearTrapFlag = true;
     1920        pVCpu->hmr0.s.fClearTrapFlag = true;
    19211921        pVmcb->guest.u64RFlags |= X86_EFL_TF;
    19221922        fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
     
    19521952         *        with the same values. */
    19531953        fInterceptMovDRx = true;
    1954         pVCpu->hm.s.fUsingHyperDR7 = true;
     1954        pVCpu->hmr0.s.fUsingHyperDR7 = true;
    19551955        Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
    19561956    }
     
    19671967            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    19681968        }
    1969         pVCpu->hm.s.fUsingHyperDR7 = false;
     1969        pVCpu->hmr0.s.fUsingHyperDR7 = false;
    19701970
    19711971        /*
     
    22802280                                   == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
    22812281
    2282     pVCpu->hm.s.fLeaveDone = false;
     2282    pVCpu->hmr0.s.fLeaveDone = false;
    22832283    return VINF_SUCCESS;
    22842284}
     
    23082308            VMMRZCallRing3Disable(pVCpu);
    23092309
    2310             if (!pVCpu->hm.s.fLeaveDone)
     2310            if (!pVCpu->hmr0.s.fLeaveDone)
    23112311            {
    23122312                hmR0SvmLeave(pVCpu, false /* fImportState */);
    2313                 pVCpu->hm.s.fLeaveDone = true;
     2313                pVCpu->hmr0.s.fLeaveDone = true;
    23142314            }
    23152315
     
    23422342                                           == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
    23432343
    2344             pVCpu->hm.s.fLeaveDone = false;
     2344            pVCpu->hmr0.s.fLeaveDone = false;
    23452345
    23462346            /* Restore longjmp state. */
     
    28182818            if (fWhat & CPUMCTX_EXTRN_DR6)
    28192819            {
    2820                 if (!pVCpu->hm.s.fUsingHyperDR7)
     2820                if (!pVCpu->hmr0.s.fUsingHyperDR7)
    28212821                    pCtx->dr[6] = pVmcbGuest->u64DR6;
    28222822                else
     
    28262826            if (fWhat & CPUMCTX_EXTRN_DR7)
    28272827            {
    2828                 if (!pVCpu->hm.s.fUsingHyperDR7)
     2828                if (!pVCpu->hmr0.s.fUsingHyperDR7)
    28292829                    pCtx->dr[7] = pVmcbGuest->u64DR7;
    28302830                else
     
    29942994    /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
    29952995       and done this from the SVMR0ThreadCtxCallback(). */
    2996     if (!pVCpu->hm.s.fLeaveDone)
     2996    if (!pVCpu->hmr0.s.fLeaveDone)
    29972997    {
    29982998        hmR0SvmLeave(pVCpu, true /* fImportState */);
    2999         pVCpu->hm.s.fLeaveDone = true;
     2999        pVCpu->hmr0.s.fLeaveDone = true;
    30003000    }
    30013001
     
    46204620     * Clear the X86_EFL_TF if necessary.
    46214621     */
    4622     if (pVCpu->hm.s.fClearTrapFlag)
    4623     {
    4624         pVCpu->hm.s.fClearTrapFlag = false;
     4622    if (pVCpu->hmr0.s.fClearTrapFlag)
     4623    {
     4624        pVCpu->hmr0.s.fClearTrapFlag = false;
    46254625        pCtx->eflags.Bits.u1TF = 0;
    46264626    }
     
    65536553        bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    65546554        Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
    6555         if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
    6556         {
    6557             pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     6555        if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
     6556        {
     6557            pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    65586558            hmR0SvmUpdateVmRunFunction(pVCpu);
    65596559        }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r87480 r87487  
    29882988    pVCpu->hmr0.s.idLastCpu     = pHostCpu->idCpu;
    29892989    pVCpu->hmr0.s.cTlbFlushes   = pHostCpu->cTlbFlushes;
    2990     pVCpu->hm.s.fForceTLBFlush  = false;
     2990    pVCpu->hmr0.s.fForceTLBFlush  = false;
    29912991    return;
    29922992}
     
    30893089
    30903090
    3091     pVCpu->hm.s.fForceTLBFlush = false;
     3091    pVCpu->hmr0.s.fForceTLBFlush = false;
    30923092    HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
    30933093
     
    31343134        || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    31353135    {
    3136         pVCpu->hm.s.fForceTLBFlush = true;
     3136        pVCpu->hmr0.s.fForceTLBFlush = true;
    31373137        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    31383138    }
     
    31413141    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    31423142    {
    3143         pVCpu->hm.s.fForceTLBFlush = true;
     3143        pVCpu->hmr0.s.fForceTLBFlush = true;
    31443144        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    31453145    }
     
    31483148    if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
    31493149    {
    3150         pVCpu->hm.s.fForceTLBFlush = true;
     3150        pVCpu->hmr0.s.fForceTLBFlush = true;
    31513151        pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
    31523152        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
     
    31563156    pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
    31573157
    3158     if (pVCpu->hm.s.fForceTLBFlush)
     3158    if (pVCpu->hmr0.s.fForceTLBFlush)
    31593159    {
    31603160        hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
    3161         pVCpu->hm.s.fForceTLBFlush = false;
     3161        pVCpu->hmr0.s.fForceTLBFlush = false;
    31623162    }
    31633163}
     
    31893189        || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    31903190    {
    3191         pVCpu->hm.s.fForceTLBFlush = true;
     3191        pVCpu->hmr0.s.fForceTLBFlush = true;
    31923192        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    31933193    }
     
    32023202         * include fExplicitFlush's too) - an obscure corner case.
    32033203         */
    3204         pVCpu->hm.s.fForceTLBFlush = true;
     3204        pVCpu->hmr0.s.fForceTLBFlush = true;
    32053205        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    32063206    }
     
    32093209    if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
    32103210    {
    3211         pVCpu->hm.s.fForceTLBFlush = true;
     3211        pVCpu->hmr0.s.fForceTLBFlush = true;
    32123212        pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
    32133213        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
     
    32163216    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    32173217    pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
    3218     if (pVCpu->hm.s.fForceTLBFlush)
     3218    if (pVCpu->hmr0.s.fForceTLBFlush)
    32193219    {
    32203220        ++pHostCpu->uCurrentAsid;
     
    32263226        }
    32273227
    3228         pVCpu->hm.s.fForceTLBFlush = false;
     3228        pVCpu->hmr0.s.fForceTLBFlush = false;
    32293229        pVCpu->hmr0.s.cTlbFlushes    = pHostCpu->cTlbFlushes;
    32303230        pVCpu->hmr0.s.uCurrentAsid   = pHostCpu->uCurrentAsid;
     
    42064206        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    42074207    };
    4208     uintptr_t const idx = (pVCpu->hm.s.fLoadSaveGuestXcr0                             ?  1 : 0)
     4208    uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                           ?  1 : 0)
    42094209                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ?  2 : 0)
    42104210                        | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_L1D_ENTRY  ?  4 : 0)
     
    60256025        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    60266026        bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    6027         if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
    6028         {
    6029             pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     6027        if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
     6028        {
     6029            pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    60306030            hmR0VmxUpdateStartVmFunction(pVCpu);
    60316031        }
     
    60966096            pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
    60976097            pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    6098             pVCpu->hm.s.fClearTrapFlag = true;
     6098            pVCpu->hmr0.s.fClearTrapFlag = true;
    60996099            fSteppingDB = true;
    61006100        }
     
    61216121        /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
    61226122        u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
    6123         pVCpu->hm.s.fUsingHyperDR7 = true;
     6123        pVCpu->hmr0.s.fUsingHyperDR7 = true;
    61246124        fInterceptMovDRx = true;
    61256125    }
     
    61556155        /* Update DR7 with the actual guest value. */
    61566156        u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
    6157         pVCpu->hm.s.fUsingHyperDR7 = false;
     6157        pVCpu->hmr0.s.fUsingHyperDR7 = false;
    61586158    }
    61596159
     
    71387138
    71397139    if (   fOffsettedTsc
    7140         && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
     7140        && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit))
    71417141    {
    71427142        if (pVmxTransient->fIsNestedGuest)
     
    77227722            if (fWhat & CPUMCTX_EXTRN_DR7)
    77237723            {
    7724                 if (!pVCpu->hm.s.fUsingHyperDR7)
     7724                if (!pVCpu->hmr0.s.fUsingHyperDR7)
    77257725                    rc = VMXReadVmcsNw(VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);   AssertRC(rc);
    77267726            }
     
    84328432    /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
    84338433       and done this from the VMXR0ThreadCtxCallback(). */
    8434     if (!pVCpu->hm.s.fLeaveDone)
     8434    if (!pVCpu->hmr0.s.fLeaveDone)
    84358435    {
    84368436        int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
    84378437        AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
    8438         pVCpu->hm.s.fLeaveDone = true;
     8438        pVCpu->hmr0.s.fLeaveDone = true;
    84398439    }
    84408440    Assert(!pVCpu->cpum.GstCtx.fExtrn);
     
    85848584        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    85858585
    8586     Assert(!pVCpu->hm.s.fClearTrapFlag);
     8586    Assert(!pVCpu->hmr0.s.fClearTrapFlag);
    85878587
    85888588    /* Update the exit-to-ring 3 reason. */
     
    92859285    {
    92869286        pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode;
    9287         pVCpu->hm.s.fLeaveDone = false;
     9287        pVCpu->hmr0.s.fLeaveDone = false;
    92889288        Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    92899289
     
    93269326
    93279327            /* Restore host-state (FPU, debug etc.) */
    9328             if (!pVCpu->hm.s.fLeaveDone)
     9328            if (!pVCpu->hmr0.s.fLeaveDone)
    93299329            {
    93309330                /*
     
    93339333                 */
    93349334                hmR0VmxLeave(pVCpu, false /* fImportState */);
    9335                 pVCpu->hm.s.fLeaveDone = true;
     9335                pVCpu->hmr0.s.fLeaveDone = true;
    93369336            }
    93379337
     
    93689368            AssertRC(rc);
    93699369            Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    9370             pVCpu->hm.s.fLeaveDone = false;
     9370            pVCpu->hmr0.s.fLeaveDone = false;
    93719371
    93729372            /* Do the EMT scheduled L1D flush if needed. */
     
    1197811978    pDbgState->fCpe1Extra       &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;
    1197911979    pDbgState->fCpe1Unwanted    &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0;
    11980     if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
    11981     {
    11982         pVCpu->hm.s.fDebugWantRdTscExit ^= true;
     11980    if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
     11981    {
     11982        pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
    1198311983        pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    1198411984    }
     
    1255812558    bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
    1255912559    pVCpu->hm.s.fSingleInstruction     = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
    12560     pVCpu->hm.s.fDebugWantRdTscExit    = false;
    12561     pVCpu->hm.s.fUsingDebugLoop        = true;
     12560    pVCpu->hmr0.s.fDebugWantRdTscExit    = false;
     12561    pVCpu->hmr0.s.fUsingDebugLoop        = true;
    1256212562
    1256312563    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
     
    1266812668     * Clear the X86_EFL_TF if necessary.
    1266912669     */
    12670     if (pVCpu->hm.s.fClearTrapFlag)
     12670    if (pVCpu->hmr0.s.fClearTrapFlag)
    1267112671    {
    1267212672        int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
    1267312673        AssertRC(rc);
    12674         pVCpu->hm.s.fClearTrapFlag = false;
     12674        pVCpu->hmr0.s.fClearTrapFlag = false;
    1267512675        pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
    1267612676    }
     
    1268012680
    1268112681    /* Restore HMCPU indicators. */
    12682     pVCpu->hm.s.fUsingDebugLoop     = false;
    12683     pVCpu->hm.s.fDebugWantRdTscExit = false;
     12682    pVCpu->hmr0.s.fUsingDebugLoop     = false;
     12683    pVCpu->hmr0.s.fDebugWantRdTscExit = false;
    1268412684    pVCpu->hm.s.fSingleInstruction  = fSavedSingleInstruction;
    1268512685
     
    1389413894            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
    1389513895            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    13896                       pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     13896                      pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
    1389713897            break;
    1389813898
     
    1393313933    {
    1393413934#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
    13935         Assert(pVmxTransient->fIsNestedGuest || pVCpu->hm.s.fUsingDebugLoop);
     13935        Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
    1393613936#endif
    1393713937        pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
     
    1427914279    {
    1428014280#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    14281         Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
     14281        Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
    1428214282#endif
    1428314283        /*
     
    1435514355#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1435614356    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14357     AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
     14357    AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
    1435814358              ("uVector=%#x u32XcptBitmap=%#X32\n",
    1435914359               VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
     
    1485314853{
    1485414854    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14855     Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
     14855    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
    1485614856
    1485714857    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     
    1500715007    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1500815008    bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    15009     if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0)
    15010     {
    15011         pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
     15009    if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
     15010    {
     15011        pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    1501215012        hmR0VmxUpdateStartVmFunction(pVCpu);
    1501315013    }
     
    1549215492                   || !pVM->hm.s.fNestedPaging
    1549315493                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    15494                    || pVCpu->hm.s.fUsingDebugLoop);
     15494                   || pVCpu->hmr0.s.fUsingDebugLoop);
    1549515495
    1549615496            /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
     
    1554415544                   || !pVM->hm.s.fNestedPaging
    1554515545                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    15546                    || pVCpu->hm.s.fUsingDebugLoop);
     15546                   || pVCpu->hmr0.s.fLeaveDone);
    1554715547
    1554815548            /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
  • trunk/src/VBox/VMM/include/HMInternal.h

    r87480 r87487  
    967967    /** Set when the TLB has been checked until we return from the world switch. */
    968968    bool volatile               fCheckedTLBFlush;
    969     /** Set when we're using VT-x or AMD-V at that moment. */
     969    /** Set when we're using VT-x or AMD-V at that moment.
     970     * @todo r=bird: Misleading description.  For AMD-V this will be set the first
     971     *       time HMCanExecuteGuest() is called and only cleared again by
     972     *       HMR3ResetCpu().  For VT-x it will be set by HMCanExecuteGuest when we
     973     *       can execute something in VT-x mode, and cleared if we cannot.
     974     *
     975     *       The field is much more about recording the last HMCanExecuteGuest
     976     *       return value than anything about any "moment". */
    970977    bool                        fActive;
    971     /** Whether we've completed the inner HM leave function. */
    972     bool                        fLeaveDone;
    973     /** Whether we're using the hyper DR7 or guest DR7. */
    974     bool                        fUsingHyperDR7;
    975 
    976     /** Set if we need to flush the TLB during the world switch. */
    977     bool                        fForceTLBFlush;
     978
    978979    /** Whether we should use the debug loop because of single stepping or special
    979980     *  debug breakpoints / events are armed. */
    980981    bool                        fUseDebugLoop;
    981     /** Whether we are currently executing in the debug loop.
    982      *  Mainly for assertions. */
    983     bool                        fUsingDebugLoop;
    984     /** Set if we using the debug loop and wish to intercept RDTSC. */
    985     bool                        fDebugWantRdTscExit;
    986 
    987     /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
    988      *  execution. */
    989     bool                        fLoadSaveGuestXcr0;
     982
    990983    /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
    991984    bool                        fGIMTrapXcptUD;
     
    995988    bool                        fSingleInstruction;
    996989
    997     /** Set if we need to clear the trap flag because of single stepping. */
    998     bool                        fClearTrapFlag;
    999     bool                        afAlignment0[3];
     990    bool                        afAlignment0[2];
    1000991
    1001992    /** An additional error code used for some gurus. */
     
    10961087    HMEVENT                 Event;
    10971088
    1098     /** Current shadow paging mode for updating CR4. */
     1089    /** Current shadow paging mode for updating CR4.
     1090     * @todo move later (@bugref{9217}).  */
    10991091    PGMMODE                 enmShadowMode;
    11001092    uint32_t                u32TemporaryPadding;
     
    12571249typedef HMCPU *PHMCPU;
    12581250AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush,  4);
    1259 AssertCompileMemberAlignment(HMCPU, fForceTLBFlush,    4);
    12601251AssertCompileMemberAlignment(HMCPU, fCtxChanged,       8);
    12611252AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8);
     
    12831274    uint32_t                    uCurrentAsid;
    12841275
    1285     uint32_t                    u32Padding0;
     1276    /** Set if we need to flush the TLB during the world switch. */
     1277    bool                        fForceTLBFlush;
     1278    /** Whether we've completed the inner HM leave function. */
     1279    bool                        fLeaveDone;
     1280    /** Whether we're using the hyper DR7 or guest DR7. */
     1281    bool                        fUsingHyperDR7;
     1282    /** Whether we are currently executing in the debug loop.
     1283     *  Mainly for assertions. */
     1284    bool                        fUsingDebugLoop;
     1285    /** Set if we using the debug loop and wish to intercept RDTSC. */
     1286    bool                        fDebugWantRdTscExit;
     1287    /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
     1288     *  execution. */
     1289    bool                        fLoadSaveGuestXcr0;
     1290    /** Set if we need to clear the trap flag because of single stepping. */
     1291    bool                        fClearTrapFlag;
     1292
     1293    bool                        afPadding1[5];
    12861294
    12871295    union HM_NAMELESS_UNION_TAG(HMR0CPUUNION) /* no tag! */
     
    13441352typedef HMR0PERVCPU *PHMR0PERVCPU;
    13451353AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4);
     1354AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush,    4);
    13461355AssertCompileMemberAlignment(HMR0PERVCPU, HM_UNION_NM(u.) vmx.RestoreHost,    8);
    13471356
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r87480 r87487  
    161161    .fCheckedTLBFlush               resb    1
    162162    .fActive                        resb    1
    163     .fLeaveDone                     resb    1
    164     .fUsingHyperDR7                 resb    1
    165     .fForceTLBFlush                 resb    1
    166163    .fUseDebugLoop                  resb    1
    167     .fUsingDebugLoop                resb    1
    168     .fDebugWantRdTscExit            resb    1
    169 
    170     .fLoadSaveGuestXcr0             resb    1
     164
    171165    .fGIMTrapXcptUD                 resb    1
    172166    .fTrapXcptGpForLovelyMesaDrv    resb    1
    173167    .fSingleInstruction             resb    1
    174     .fClearTrapFlag                 resb    1
    175168    alignb 8
    176169
     
    232225    .uCurrentAsid                   resd    1
    233226
     227    .fForceTLBFlush                 resb    1
     228    .fLeaveDone                     resb    1
     229    .fUsingHyperDR7                 resb    1
     230    .fUsingDebugLoop                resb    1
     231    .fDebugWantRdTscExit            resb    1
     232    .fLoadSaveGuestXcr0             resb    1
     233    .fClearTrapFlag                 resb    1
     234
    234235    alignb 8
    235236;%if HMR0CPUVMX_size > HMR0CPUSVM_size
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette