Changeset 87487 in vbox
- Timestamp:
- Jan 29, 2021 6:06:39 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r87480 r87487 485 485 } 486 486 487 487 488 /** 488 489 * Poke an EMT so it can perform the appropriate TLB shootdowns. -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87469 r87487 786 786 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change. 787 787 ; 788 cmp byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], %2788 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2 789 789 mov eax, VERR_VMX_STARTVM_PRECOND_0 790 790 jne NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1).precond_failure_return) … … 1253 1253 ; Verify template preconditions / parameters to ensure HMSVM.cpp didn't miss some state change. 1254 1254 ; 1255 cmp byte [rsi + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], %21255 cmp byte [rsi + GVMCPU.hmr0 + HMR0PERVCPU.fLoadSaveGuestXcr0], %2 1256 1256 mov eax, VERR_SVM_VMRUN_PRECOND_0 1257 1257 jne .failure_return -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87480 r87487 731 731 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit }, 732 732 }; 733 uintptr_t const idx = (pVCpu->hm .s.fLoadSaveGuestXcr0? 1 : 0)733 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0) 734 734 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0) 735 735 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_EXIT ? 4 : 0); … … 1291 1291 { 1292 1292 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 1293 pVCpu->hm .s.fForceTLBFlush = true;1293 pVCpu->hmr0.s.fForceTLBFlush = true; 1294 1294 fNewAsid = true; 1295 1295 } … … 1301 1301 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1302 1302 { 1303 pVCpu->hm .s.fForceTLBFlush = true;1303 pVCpu->hmr0.s.fForceTLBFlush = true; 1304 1304 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 1305 1305 } … … 1324 1324 { 1325 1325 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 1326 if (pVCpu->hm .s.fForceTLBFlush)1326 if (pVCpu->hmr0.s.fForceTLBFlush) 1327 1327 { 1328 1328 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */ … … 1360 1360 } 1361 1361 1362 pVCpu->hm .s.fForceTLBFlush = false;1362 pVCpu->hmr0.s.fForceTLBFlush = false; 1363 1363 } 1364 1364 } … … 1669 1669 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1670 1670 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1671 if (fLoadSaveGuestXcr0 != pVCpu->hm .s.fLoadSaveGuestXcr0)1672 { 1673 pVCpu->hm .s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;1671 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0) 1672 { 1673 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 1674 1674 hmR0SvmUpdateVmRunFunction(pVCpu); 1675 1675 } … … 1918 1918 if (fStepping) 1919 1919 { 1920 pVCpu->hm .s.fClearTrapFlag = true;1920 pVCpu->hmr0.s.fClearTrapFlag = true; 1921 1921 pVmcb->guest.u64RFlags |= X86_EFL_TF; 1922 1922 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */ … … 1952 1952 * with the same values. */ 1953 1953 fInterceptMovDRx = true; 1954 pVCpu->hm .s.fUsingHyperDR7 = true;1954 pVCpu->hmr0.s.fUsingHyperDR7 = true; 1955 1955 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n")); 1956 1956 } … … 1967 1967 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1968 1968 } 1969 pVCpu->hm .s.fUsingHyperDR7 = false;1969 pVCpu->hmr0.s.fUsingHyperDR7 = false; 1970 1970 1971 1971 /* … … 2280 2280 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 2281 2281 2282 pVCpu->hm .s.fLeaveDone = false;2282 pVCpu->hmr0.s.fLeaveDone = false; 2283 2283 return VINF_SUCCESS; 2284 2284 } … … 2308 2308 VMMRZCallRing3Disable(pVCpu); 2309 2309 2310 if (!pVCpu->hm .s.fLeaveDone)2310 if (!pVCpu->hmr0.s.fLeaveDone) 2311 2311 { 2312 2312 hmR0SvmLeave(pVCpu, false /* fImportState */); 2313 pVCpu->hm .s.fLeaveDone = true;2313 pVCpu->hmr0.s.fLeaveDone = true; 2314 2314 } 2315 2315 … … 2342 2342 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 2343 2343 2344 pVCpu->hm .s.fLeaveDone = false;2344 pVCpu->hmr0.s.fLeaveDone = false; 2345 2345 2346 2346 /* Restore longjmp state. */ … … 2818 2818 if (fWhat & CPUMCTX_EXTRN_DR6) 2819 2819 { 2820 if (!pVCpu->hm .s.fUsingHyperDR7)2820 if (!pVCpu->hmr0.s.fUsingHyperDR7) 2821 2821 pCtx->dr[6] = pVmcbGuest->u64DR6; 2822 2822 else … … 2826 2826 if (fWhat & CPUMCTX_EXTRN_DR7) 2827 2827 { 2828 if (!pVCpu->hm .s.fUsingHyperDR7)2828 if (!pVCpu->hmr0.s.fUsingHyperDR7) 2829 2829 pCtx->dr[7] = pVmcbGuest->u64DR7; 2830 2830 else … … 2994 2994 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before 2995 2995 and done this from the SVMR0ThreadCtxCallback(). */ 2996 if (!pVCpu->hm .s.fLeaveDone)2996 if (!pVCpu->hmr0.s.fLeaveDone) 2997 2997 { 2998 2998 hmR0SvmLeave(pVCpu, true /* fImportState */); 2999 pVCpu->hm .s.fLeaveDone = true;2999 pVCpu->hmr0.s.fLeaveDone = true; 3000 3000 } 3001 3001 … … 4620 4620 * Clear the X86_EFL_TF if necessary. 4621 4621 */ 4622 if (pVCpu->hm .s.fClearTrapFlag)4623 { 4624 pVCpu->hm .s.fClearTrapFlag = false;4622 if (pVCpu->hmr0.s.fClearTrapFlag) 4623 { 4624 pVCpu->hmr0.s.fClearTrapFlag = false; 4625 4625 pCtx->eflags.Bits.u1TF = 0; 4626 4626 } … … 6553 6553 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6554 6554 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4)); 6555 if (fLoadSaveGuestXcr0 != pVCpu->hm .s.fLoadSaveGuestXcr0)6556 { 6557 pVCpu->hm .s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;6555 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0) 6556 { 6557 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 6558 6558 hmR0SvmUpdateVmRunFunction(pVCpu); 6559 6559 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87480 r87487 2988 2988 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2989 2989 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2990 pVCpu->hm .s.fForceTLBFlush = false;2990 pVCpu->hmr0.s.fForceTLBFlush = false; 2991 2991 return; 2992 2992 } … … 3089 3089 3090 3090 3091 pVCpu->hm .s.fForceTLBFlush = false;3091 pVCpu->hmr0.s.fForceTLBFlush = false; 3092 3092 HMVMX_UPDATE_FLUSH_SKIPPED_STAT(); 3093 3093 … … 3134 3134 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes) 3135 3135 { 3136 pVCpu->hm .s.fForceTLBFlush = true;3136 pVCpu->hmr0.s.fForceTLBFlush = true; 3137 3137 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 3138 3138 } … … 3141 3141 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 3142 3142 { 3143 pVCpu->hm .s.fForceTLBFlush = true;3143 pVCpu->hmr0.s.fForceTLBFlush = true; 3144 3144 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 3145 3145 } … … 3148 3148 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb) 3149 3149 { 3150 pVCpu->hm .s.fForceTLBFlush = true;3150 pVCpu->hmr0.s.fForceTLBFlush = true; 3151 3151 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false; 3152 3152 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst); … … 3156 3156 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 3157 3157 3158 if (pVCpu->hm .s.fForceTLBFlush)3158 if (pVCpu->hmr0.s.fForceTLBFlush) 3159 3159 { 3160 3160 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt); 3161 pVCpu->hm .s.fForceTLBFlush = false;3161 pVCpu->hmr0.s.fForceTLBFlush = false; 3162 3162 } 3163 3163 } … … 3189 3189 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes) 3190 3190 { 3191 pVCpu->hm .s.fForceTLBFlush = true;3191 pVCpu->hmr0.s.fForceTLBFlush = true; 3192 3192 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 3193 3193 } … … 3202 3202 * include fExplicitFlush's too) - an obscure corner case. 3203 3203 */ 3204 pVCpu->hm .s.fForceTLBFlush = true;3204 pVCpu->hmr0.s.fForceTLBFlush = true; 3205 3205 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 3206 3206 } … … 3209 3209 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb) 3210 3210 { 3211 pVCpu->hm .s.fForceTLBFlush = true;3211 pVCpu->hmr0.s.fForceTLBFlush = true; 3212 3212 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false; 3213 3213 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst); … … 3216 3216 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 3217 3217 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 3218 if (pVCpu->hm .s.fForceTLBFlush)3218 if (pVCpu->hmr0.s.fForceTLBFlush) 3219 3219 { 3220 3220 ++pHostCpu->uCurrentAsid; … … 3226 3226 } 3227 3227 3228 pVCpu->hm .s.fForceTLBFlush = false;3228 pVCpu->hmr0.s.fForceTLBFlush = false; 3229 3229 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes; 3230 3230 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; … … 4206 4206 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit }, 4207 4207 }; 4208 uintptr_t const idx = (pVCpu->hm .s.fLoadSaveGuestXcr0? 1 : 0)4208 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0) 4209 4209 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0) 4210 4210 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_L1D_ENTRY ? 4 : 0) … … 6025 6025 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 6026 6026 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6027 if (fLoadSaveGuestXcr0 != pVCpu->hm .s.fLoadSaveGuestXcr0)6028 { 6029 pVCpu->hm .s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;6027 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0) 6028 { 6029 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 6030 6030 hmR0VmxUpdateStartVmFunction(pVCpu); 6031 6031 } … … 6096 6096 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF; 6097 6097 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS; 6098 pVCpu->hm .s.fClearTrapFlag = true;6098 pVCpu->hmr0.s.fClearTrapFlag = true; 6099 6099 fSteppingDB = true; 6100 6100 } … … 6121 6121 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */ 6122 6122 u64GuestDr7 = CPUMGetHyperDR7(pVCpu); 6123 pVCpu->hm .s.fUsingHyperDR7 = true;6123 pVCpu->hmr0.s.fUsingHyperDR7 = true; 6124 6124 fInterceptMovDRx = true; 6125 6125 } … … 6155 6155 /* Update DR7 with the actual guest value. */ 6156 6156 u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7]; 6157 pVCpu->hm .s.fUsingHyperDR7 = false;6157 pVCpu->hmr0.s.fUsingHyperDR7 = false; 6158 6158 } 6159 6159 … … 7138 7138 7139 7139 if ( fOffsettedTsc 7140 && RT_LIKELY(!pVCpu->hm .s.fDebugWantRdTscExit))7140 && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit)) 7141 7141 { 7142 7142 if (pVmxTransient->fIsNestedGuest) … … 7722 7722 if (fWhat & CPUMCTX_EXTRN_DR7) 7723 7723 { 7724 if (!pVCpu->hm .s.fUsingHyperDR7)7724 if (!pVCpu->hmr0.s.fUsingHyperDR7) 7725 7725 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_DR7, &pCtx->dr[7]); AssertRC(rc); 7726 7726 } … … 8432 8432 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before 8433 8433 and done this from the VMXR0ThreadCtxCallback(). */ 8434 if (!pVCpu->hm .s.fLeaveDone)8434 if (!pVCpu->hmr0.s.fLeaveDone) 8435 8435 { 8436 8436 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */); 8437 8437 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2); 8438 pVCpu->hm .s.fLeaveDone = true;8438 pVCpu->hmr0.s.fLeaveDone = true; 8439 8439 } 8440 8440 Assert(!pVCpu->cpum.GstCtx.fExtrn); … … 8584 8584 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 8585 8585 8586 Assert(!pVCpu->hm .s.fClearTrapFlag);8586 Assert(!pVCpu->hmr0.s.fClearTrapFlag); 8587 8587 8588 8588 /* Update the exit-to-ring 3 reason. */ … … 9285 9285 { 9286 9286 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode; 9287 pVCpu->hm .s.fLeaveDone = false;9287 pVCpu->hmr0.s.fLeaveDone = false; 9288 9288 Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId())); 9289 9289 … … 9326 9326 9327 9327 /* Restore host-state (FPU, debug etc.) */ 9328 if (!pVCpu->hm .s.fLeaveDone)9328 if (!pVCpu->hmr0.s.fLeaveDone) 9329 9329 { 9330 9330 /* … … 9333 9333 */ 9334 9334 hmR0VmxLeave(pVCpu, false /* fImportState */); 9335 pVCpu->hm .s.fLeaveDone = true;9335 pVCpu->hmr0.s.fLeaveDone = true; 9336 9336 } 9337 9337 … … 9368 9368 AssertRC(rc); 9369 9369 Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId())); 9370 pVCpu->hm .s.fLeaveDone = false;9370 pVCpu->hmr0.s.fLeaveDone = false; 9371 9371 9372 9372 /* Do the EMT scheduled L1D flush if needed. */ … … 11978 11978 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; 11979 11979 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0; 11980 if (pVCpu->hm .s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))11981 { 11982 pVCpu->hm .s.fDebugWantRdTscExit ^= true;11980 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT)) 11981 { 11982 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true; 11983 11983 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 11984 11984 } … … 12558 12558 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction; 12559 12559 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu); 12560 pVCpu->hm .s.fDebugWantRdTscExit = false;12561 pVCpu->hm .s.fUsingDebugLoop = true;12560 pVCpu->hmr0.s.fDebugWantRdTscExit = false; 12561 pVCpu->hmr0.s.fUsingDebugLoop = true; 12562 12562 12563 12563 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ … … 12668 12668 * Clear the X86_EFL_TF if necessary. 12669 12669 */ 12670 if (pVCpu->hm .s.fClearTrapFlag)12670 if (pVCpu->hmr0.s.fClearTrapFlag) 12671 12671 { 12672 12672 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 12673 12673 AssertRC(rc); 12674 pVCpu->hm .s.fClearTrapFlag = false;12674 pVCpu->hmr0.s.fClearTrapFlag = false; 12675 12675 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0; 12676 12676 } … … 12680 12680 12681 12681 /* Restore HMCPU indicators. */ 12682 pVCpu->hm .s.fUsingDebugLoop = false;12683 pVCpu->hm .s.fDebugWantRdTscExit = false;12682 pVCpu->hmr0.s.fUsingDebugLoop = false; 12683 pVCpu->hmr0.s.fDebugWantRdTscExit = false; 12684 12684 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction; 12685 12685 … … 13894 13894 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 13895 13895 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 13896 pVCpu->cpum.GstCtx.cr4, pVCpu->hm .s.fLoadSaveGuestXcr0));13896 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0)); 13897 13897 break; 13898 13898 … … 13933 13933 { 13934 13934 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) 13935 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hm .s.fUsingDebugLoop);13935 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop); 13936 13936 #endif 13937 13937 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */ … … 14279 14279 { 14280 14280 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 14281 Assert(pVCpu->hm .s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);14281 Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest); 14282 14282 #endif 14283 14283 /* … … 14355 14355 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 14356 14356 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14357 AssertMsg(pVCpu->hm .s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,14357 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest, 14358 14358 ("uVector=%#x u32XcptBitmap=%#X32\n", 14359 14359 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap)); … … 14853 14853 { 14854 14854 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14855 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm .s.fUsingDebugLoop);14855 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop); 14856 14856 14857 14857 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; … … 15007 15007 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 15008 15008 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 15009 if (fLoadSaveGuestXcr0 != pVCpu->hm .s.fLoadSaveGuestXcr0)15010 { 15011 pVCpu->hm .s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;15009 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0) 15010 { 15011 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 15012 15012 hmR0VmxUpdateStartVmFunction(pVCpu); 15013 15013 } … … 15492 15492 || !pVM->hm.s.fNestedPaging 15493 15493 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 15494 || pVCpu->hm .s.fUsingDebugLoop);15494 || pVCpu->hmr0.s.fUsingDebugLoop); 15495 15495 15496 15496 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */ … … 15544 15544 || !pVM->hm.s.fNestedPaging 15545 15545 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 15546 || pVCpu->hm .s.fUsingDebugLoop);15546 || pVCpu->hmr0.s.fLeaveDone); 15547 15547 15548 15548 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ -
trunk/src/VBox/VMM/include/HMInternal.h
r87480 r87487 967 967 /** Set when the TLB has been checked until we return from the world switch. */ 968 968 bool volatile fCheckedTLBFlush; 969 /** Set when we're using VT-x or AMD-V at that moment. */ 969 /** Set when we're using VT-x or AMD-V at that moment. 970 * @todo r=bird: Misleading description. For AMD-V this will be set the first 971 * time HMCanExecuteGuest() is called and only cleared again by 972 * HMR3ResetCpu(). For VT-x it will be set by HMCanExecuteGuest when we 973 * can execute something in VT-x mode, and cleared if we cannot. 974 * 975 * The field is much more about recording the last HMCanExecuteGuest 976 * return value than anything about any "moment". */ 970 977 bool fActive; 971 /** Whether we've completed the inner HM leave function. */ 972 bool fLeaveDone; 973 /** Whether we're using the hyper DR7 or guest DR7. */ 974 bool fUsingHyperDR7; 975 976 /** Set if we need to flush the TLB during the world switch. */ 977 bool fForceTLBFlush; 978 978 979 /** Whether we should use the debug loop because of single stepping or special 979 980 * debug breakpoints / events are armed. */ 980 981 bool fUseDebugLoop; 981 /** Whether we are currently executing in the debug loop. 982 * Mainly for assertions. */ 983 bool fUsingDebugLoop; 984 /** Set if we using the debug loop and wish to intercept RDTSC. */ 985 bool fDebugWantRdTscExit; 986 987 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 988 * execution. */ 989 bool fLoadSaveGuestXcr0; 982 990 983 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */ 991 984 bool fGIMTrapXcptUD; … … 995 988 bool fSingleInstruction; 996 989 997 /** Set if we need to clear the trap flag because of single stepping. */ 998 bool fClearTrapFlag; 999 bool afAlignment0[3]; 990 bool afAlignment0[2]; 1000 991 1001 992 /** An additional error code used for some gurus. */ … … 1096 1087 HMEVENT Event; 1097 1088 1098 /** Current shadow paging mode for updating CR4. */ 1089 /** Current shadow paging mode for updating CR4. 1090 * @todo move later (@bugref{9217}). */ 1099 1091 PGMMODE enmShadowMode; 1100 1092 uint32_t u32TemporaryPadding; … … 1257 1249 typedef HMCPU *PHMCPU; 1258 1250 AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4); 1259 AssertCompileMemberAlignment(HMCPU, fForceTLBFlush, 4);1260 1251 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8); 1261 1252 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8); … … 1283 1274 uint32_t uCurrentAsid; 1284 1275 1285 uint32_t u32Padding0; 1276 /** Set if we need to flush the TLB during the world switch. */ 1277 bool fForceTLBFlush; 1278 /** Whether we've completed the inner HM leave function. */ 1279 bool fLeaveDone; 1280 /** Whether we're using the hyper DR7 or guest DR7. */ 1281 bool fUsingHyperDR7; 1282 /** Whether we are currently executing in the debug loop. 1283 * Mainly for assertions. */ 1284 bool fUsingDebugLoop; 1285 /** Set if we using the debug loop and wish to intercept RDTSC. */ 1286 bool fDebugWantRdTscExit; 1287 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 1288 * execution. */ 1289 bool fLoadSaveGuestXcr0; 1290 /** Set if we need to clear the trap flag because of single stepping. */ 1291 bool fClearTrapFlag; 1292 1293 bool afPadding1[5]; 1286 1294 1287 1295 union HM_NAMELESS_UNION_TAG(HMR0CPUUNION) /* no tag! */ … … 1344 1352 typedef HMR0PERVCPU *PHMR0PERVCPU; 1345 1353 AssertCompileMemberAlignment(HMR0PERVCPU, cWorldSwitchExits, 4); 1354 AssertCompileMemberAlignment(HMR0PERVCPU, fForceTLBFlush, 4); 1346 1355 AssertCompileMemberAlignment(HMR0PERVCPU, HM_UNION_NM(u.) vmx.RestoreHost, 8); 1347 1356 -
trunk/src/VBox/VMM/include/HMInternal.mac
r87480 r87487 161 161 .fCheckedTLBFlush resb 1 162 162 .fActive resb 1 163 .fLeaveDone resb 1164 .fUsingHyperDR7 resb 1165 .fForceTLBFlush resb 1166 163 .fUseDebugLoop resb 1 167 .fUsingDebugLoop resb 1 168 .fDebugWantRdTscExit resb 1 169 170 .fLoadSaveGuestXcr0 resb 1 164 171 165 .fGIMTrapXcptUD resb 1 172 166 .fTrapXcptGpForLovelyMesaDrv resb 1 173 167 .fSingleInstruction resb 1 174 .fClearTrapFlag resb 1175 168 alignb 8 176 169 … … 232 225 .uCurrentAsid resd 1 233 226 227 .fForceTLBFlush resb 1 228 .fLeaveDone resb 1 229 .fUsingHyperDR7 resb 1 230 .fUsingDebugLoop resb 1 231 .fDebugWantRdTscExit resb 1 232 .fLoadSaveGuestXcr0 resb 1 233 .fClearTrapFlag resb 1 234 234 235 alignb 8 235 236 ;%if HMR0CPUVMX_size > HMR0CPUSVM_size
Note:
See TracChangeset
for help on using the changeset viewer.