Changeset 72805 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 3, 2018 4:05:43 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123348
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r72772 r72805 88 88 /** @name Ring-0 method table for AMD-V and VT-x specific operations. 89 89 * @{ */ 90 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu));90 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)); 91 91 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 92 92 DECLR0CALLBACKMEMBER(int, pfnExportHostState, (PVMCPU pVCpu)); 93 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));93 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu, PCPUMCTX pCtx)); 94 94 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 95 95 bool fEnabledByHost, void *pvArg)); … … 234 234 * @{ */ 235 235 236 static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)237 { 238 RT_NOREF 3(pVM,pVCpu, pCpu);236 static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 237 { 238 RT_NOREF2(pVCpu, pCpu); 239 239 return VINF_SUCCESS; 240 240 } … … 276 276 } 277 277 278 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)279 { 280 RT_NOREF 3(pVM,pVCpu, pCtx);278 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 279 { 280 RT_NOREF2(pVCpu, pCtx); 281 281 return VINF_SUCCESS; 282 282 } … … 1389 1389 * @remarks No-long-jump zone!!! 1390 1390 */ 1391 VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu)1391 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu) 1392 1392 { 1393 1393 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1418 1418 * 1419 1419 * @returns VBox status code. 1420 * @param pVM The cross context VM structure.1421 1420 * @param pVCpu The cross context virtual CPU structure. 1422 1421 * 1423 1422 * @remarks This is called with preemption disabled. 1424 1423 */ 1425 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)1424 VMMR0_INT_DECL(int) HMR0Enter(PVMCPU pVCpu) 1426 1425 { 1427 1426 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ … … 1430 1429 1431 1430 /* Load the bare minimum state required for entering HM. */ 1432 int rc = HMR0EnterCpu(pVCpu);1431 int rc = hmR0EnterCpu(pVCpu); 1433 1432 AssertRCReturn(rc, rc); 1434 1433 … … 1452 1451 } 1453 1452 1454 rc = g_HmR0.pfnEnterSession(pV M, pVCpu, pCpu);1453 rc = g_HmR0.pfnEnterSession(pVCpu, pCpu); 1455 1454 AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1456 1455 … … 1537 1536 VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu) 1538 1537 { 1538 RT_NOREF(pVM); 1539 1539 1540 #ifdef VBOX_STRICT 1540 1541 /* With thread-context hooks we would be running this code with preemption enabled. */ … … 1554 1555 #endif 1555 1556 1556 VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pV M, pVCpu, &pVCpu->cpum.GstCtx);1557 VBOXSTRICTRC rcStrict = g_HmR0.pfnRunGuestCode(pVCpu, &pVCpu->cpum.GstCtx); 1557 1558 1558 1559 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1669 1670 PVM pVM = pVCpu->CTX_SUFF(pVM); 1670 1671 if (pVM->hm.s.vmx.fSupported) 1671 return VMXR0InvalidatePage(pV M, pVCpu, GCVirt);1672 return SVMR0InvalidatePage(pV M, pVCpu, GCVirt);1672 return VMXR0InvalidatePage(pVCpu, GCVirt); 1673 return SVMR0InvalidatePage(pVCpu, GCVirt); 1673 1674 } 1674 1675 … … 1728 1729 1729 1730 #ifdef VBOX_WITH_RAW_MODE 1730 1731 1731 /** 1732 1732 * Raw-mode switcher hook - disable VT-x if it's active *and* the current … … 1823 1823 } 1824 1824 } 1825 1826 1825 #endif /* VBOX_WITH_RAW_MODE */ 1826 1827 1827 1828 #ifdef VBOX_STRICT 1828 1829 1829 /** 1830 1830 * Dumps a descriptor. … … 1953 1953 * Formats a full register dump. 1954 1954 * 1955 * @param pVM The cross context VM structure.1956 1955 * @param pVCpu The cross context virtual CPU structure. 1957 1956 * @param pCtx Pointer to the CPU context. 1958 1957 */ 1959 VMMR0_INT_DECL(void) hmR0DumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1960 { 1961 NOREF(pVM); 1962 1958 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 1959 { 1963 1960 /* 1964 1961 * Format the flags. … … 2098 2095 NOREF(pFpuCtx); 2099 2096 } 2100 2101 2097 #endif /* VBOX_STRICT */ 2102 2098 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72786 r72805 73 73 # endif 74 74 #endif /* !VBOX_WITH_STATISTICS */ 75 76 75 77 76 /** If we decide to use a function table approach this can be useful to … … 528 527 * 529 528 * @returns VBox status code. 530 * @param p CpuPointer to the CPU info struct.529 * @param pHostCpu Pointer to the CPU info struct. 531 530 * @param pVM The cross context VM structure. Can be 532 531 * NULL after a resume! … … 536 535 * @param pvArg Unused on AMD-V. 537 536 */ 538 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO p Cpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,537 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 539 538 void *pvArg) 540 539 { … … 560 559 if ( pVM 561 560 && pVM->hm.s.svm.fIgnoreInUseError) 562 p Cpu->fIgnoreAMDVInUseError = true;563 564 if (!p Cpu->fIgnoreAMDVInUseError)561 pHostCpu->fIgnoreAMDVInUseError = true; 562 563 if (!pHostCpu->fIgnoreAMDVInUseError) 565 564 { 566 565 ASMSetFlags(fEFlags); … … 584 583 * entirely with before executing any guest code. 585 584 */ 586 p Cpu->fFlushAsidBeforeUse = true;585 pHostCpu->fFlushAsidBeforeUse = true; 587 586 588 587 /* 589 588 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}. 590 589 */ 591 ++p Cpu->cTlbFlushes;590 ++pHostCpu->cTlbFlushes; 592 591 593 592 return VINF_SUCCESS; … … 599 598 * 600 599 * @returns VBox status code. 601 * @param p CpuPointer to the CPU info struct.600 * @param pHostCpu Pointer to the CPU info struct. 602 601 * @param pvCpuPage Pointer to the global CPU page. 603 602 * @param HCPhysCpuPage Physical address of the global CPU page. 604 603 */ 605 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO p Cpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)604 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 606 605 { 607 606 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 609 608 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); 610 609 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 611 NOREF(pCpu);610 RT_NOREF(pHostCpu); 612 611 613 612 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ … … 1169 1168 * 1170 1169 * @returns VBox status code. 1171 * @param pVM The cross context VM structure.1172 1170 * @param pVCpu The cross context virtual CPU structure. 1173 1171 * @param GCVirt Guest virtual address of the page to invalidate. 1174 1172 */ 1175 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 1176 { 1177 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1178 Assert(pVM->hm.s.svm.fSupported); 1179 1180 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1173 VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 1174 { 1175 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 1176 1177 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1181 1178 1182 1179 /* Skip it if a TLB flush is already pending. */ … … 1610 1607 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1611 1608 1612 Assert( RT_HI_U32(uShadowCr0) == 0);1609 Assert(!RT_HI_U32(uShadowCr0)); 1613 1610 if (pVmcb->guest.u64CR0 != uShadowCr0) 1614 1611 { … … 1723 1720 1724 1721 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */ 1725 Assert( RT_HI_U32(uShadowCr4) == 0);1722 Assert(!RT_HI_U32(uShadowCr4)); 1726 1723 pVmcb->guest.u64CR4 = uShadowCr4; 1727 1724 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS); … … 2337 2334 * 2338 2335 * @returns VBox status code. 2339 * @param pVM The cross context VM structure.2340 2336 * @param pVCpu The cross context virtual CPU structure. 2341 * @param pCpu Pointer to the CPU info struct. 2342 */ 2343 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2344 { 2345 AssertPtr(pVM); 2337 * @param pHostCpu Pointer to the CPU info struct. 2338 */ 2339 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu) 2340 { 2346 2341 AssertPtr(pVCpu); 2347 Assert(pV M->hm.s.svm.fSupported);2342 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 2348 2343 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2349 NOREF(pVM); NOREF(pCpu);2350 2351 LogFlowFunc(("pV M=%p pVCpu=%p\n", pVM, pVCpu));2344 RT_NOREF(pHostCpu); 2345 2346 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 2352 2347 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 2353 2348 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); … … 2410 2405 * initializing AMD-V if necessary (onlined CPUs, local init etc.) 2411 2406 */ 2412 int rc = HMR0EnterCpu(pVCpu);2407 int rc = hmR0EnterCpu(pVCpu); 2413 2408 AssertRC(rc); NOREF(rc); 2414 2409 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) … … 3126 3121 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 3127 3122 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 3128 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.Stat Exit1);3129 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit 2);3123 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit); 3124 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling); 3130 3125 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 3131 3126 … … 3255 3250 * 3256 3251 * @returns VBox status code. 3257 * @param pVM The cross context VM structure.3258 3252 * @param pVCpu The cross context virtual CPU structure. 3259 3253 * @param pCtx Pointer to the guest-CPU context. … … 3261 3255 * VINF_VMM_UNKNOWN_RING3_CALL. 3262 3256 */ 3263 static int hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 3264 { 3265 Assert(pVM); 3257 static int hmR0SvmExitToRing3(PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 3258 { 3266 3259 Assert(pVCpu); 3267 3260 Assert(pCtx); … … 3270 3263 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 3271 3264 VMMRZCallRing3Disable(pVCpu); 3272 Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions)); 3265 Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, 3266 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions)); 3273 3267 3274 3268 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 3290 3284 | CPUM_CHANGED_TR 3291 3285 | CPUM_CHANGED_HIDDEN_SEL_REGS); 3292 if ( pV M->hm.s.fNestedPaging3286 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 3293 3287 && CPUMIsGuestPagingEnabledEx(pCtx)) 3294 3288 { … … 3331 3325 * intercepts. 3332 3326 * 3333 * @param pVM The cross context VM structure.3334 * @param pVCpu The cross context virtual CPU structure.3335 3327 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 3336 3328 * @param pVmcb Pointer to the VM control block. … … 3338 3330 * @remarks No-long-jump zone!!! 3339 3331 */ 3340 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)3332 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb) 3341 3333 { 3342 3334 /* … … 3347 3339 bool fParavirtTsc; 3348 3340 uint64_t uTscOffset; 3349 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pV M, pVCpu, &uTscOffset, &fParavirtTsc);3341 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc); 3350 3342 3351 3343 bool fIntercept; … … 4079 4071 * Reports world-switch error and dumps some useful debug info. 4080 4072 * 4081 * @param pVM The cross context VM structure.4082 4073 * @param pVCpu The cross context virtual CPU structure. 4083 4074 * @param rcVMRun The return code from VMRUN (or … … 4086 4077 * @param pCtx Pointer to the guest-CPU context. 4087 4078 */ 4088 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 4089 { 4090 NOREF(pCtx); 4079 static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 4080 { 4091 4081 HMSVM_ASSERT_PREEMPT_SAFE(); 4092 4082 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 4093 4083 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4094 4084 4095 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4096 4085 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE) 4097 4086 { 4098 hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);4099 /** @todo We probably don't need to dump this anymore or we can expand4100 * hmR0DumpRegs()? */4101 4087 #ifdef VBOX_STRICT 4088 hmR0DumpRegs(pVCpu, pCtx); 4089 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 4102 4090 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); 4103 4091 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); … … 4225 4213 Log4Func(("rcVMRun=%d\n", rcVMRun)); 4226 4214 4227 NOREF(p Vmcb);4215 NOREF(pCtx); 4228 4216 } 4229 4217 … … 4245 4233 * to the EM loop. 4246 4234 * 4247 * @param pVM The cross context VM structure.4248 4235 * @param pVCpu The cross context virtual CPU structure. 4249 4236 * @param pCtx Pointer to the guest-CPU context. 4250 4237 */ 4251 static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)4238 static int hmR0SvmCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pCtx) 4252 4239 { 4253 4240 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 4262 4249 APICUpdatePendingInterrupts(pVCpu); 4263 4250 4251 PVM pVM = pVCpu->CTX_SUFF(pVM); 4264 4252 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction 4265 4253 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK) … … 4324 4312 * @retval VINF_* scheduling changes, we have to go back to ring-3. 4325 4313 * 4326 * @param pVM The cross context VM structure.4327 4314 * @param pVCpu The cross context virtual CPU structure. 4328 4315 * @param pCtx Pointer to the nested-guest-CPU context. … … 4332 4319 * @sa hmR0SvmPreRunGuest. 4333 4320 */ 4334 static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4321 static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4335 4322 { 4336 4323 HMSVM_ASSERT_PREEMPT_SAFE(); … … 4343 4330 4344 4331 /* Check force flag actions that might require us to go back to ring-3. */ 4345 int rc = hmR0SvmCheckForceFlags(pV M, pVCpu, pCtx);4332 int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx); 4346 4333 if (rc != VINF_SUCCESS) 4347 4334 return rc; … … 4364 4351 * NB: If we could continue a task switch exit we wouldn't need to do this. 4365 4352 */ 4353 PVM pVM = pVCpu->CTX_SUFF(pVM); 4366 4354 if (RT_UNLIKELY( !pVM->hm.s.svm.u32Features 4367 4355 && pVCpu->hm.s.Event.fPending … … 4446 4434 * @retval VINF_* scheduling changes, we have to go back to ring-3. 4447 4435 * 4448 * @param pVM The cross context VM structure.4449 4436 * @param pVCpu The cross context virtual CPU structure. 4450 4437 * @param pCtx Pointer to the guest-CPU context. 4451 4438 * @param pSvmTransient Pointer to the SVM transient structure. 4452 4439 */ 4453 static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4440 static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4454 4441 { 4455 4442 HMSVM_ASSERT_PREEMPT_SAFE(); … … 4457 4444 4458 4445 /* Check force flag actions that might require us to go back to ring-3. */ 4459 int rc = hmR0SvmCheckForceFlags(pV M, pVCpu, pCtx);4446 int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx); 4460 4447 if (rc != VINF_SUCCESS) 4461 4448 return rc; … … 4471 4458 * NB: If we could continue a task switch exit we wouldn't need to do this. 4472 4459 */ 4460 PVM pVM = pVCpu->CTX_SUFF(pVM); 4473 4461 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI))) 4474 4462 if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features)) … … 4604 4592 || fMigratedHostCpu) 4605 4593 { 4606 hmR0SvmUpdateTscOffsetting(pV M, pVCpu, pCtx, pVmcb);4594 hmR0SvmUpdateTscOffsetting(pVCpu, pCtx, pVmcb); 4607 4595 pSvmTransient->fUpdateTscOffsetting = false; 4608 4596 } … … 4687 4675 4688 4676 /** 4689 * Wrapper for running the guest code in AMD-V.4677 * Wrapper for running the guest (or nested-guest) code in AMD-V. 4690 4678 * 4691 4679 * @returns VBox strict status code. 4692 * @param pVM The cross context VM structure.4693 4680 * @param pVCpu The cross context virtual CPU structure. 4694 4681 * @param pCtx Pointer to the guest-CPU context. 4682 * @param HCPhysVmcb The host physical address of the VMCB. 4695 4683 * 4696 4684 * @remarks No-long-jump zone!!! 4697 4685 */ 4698 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)4686 DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, RTHCPHYS HCPhysVmcb) 4699 4687 { 4700 4688 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ … … 4708 4696 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4709 4697 */ 4698 PVM pVM = pVCpu->CTX_SUFF(pVM); 4710 4699 #ifdef VBOX_WITH_KERNEL_USING_XMM 4711 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 4712 pVCpu->hm.s.svm.pfnVMRun); 4700 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun); 4713 4701 #else 4714 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 4715 #endif 4716 } 4717 4718 4719 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4720 /** 4721 * Wrapper for running the nested-guest code in AMD-V. 4722 * 4723 * @returns VBox strict status code. 4724 * @param pVM The cross context VM structure. 4725 * @param pVCpu The cross context virtual CPU structure. 4726 * @param pCtx Pointer to the guest-CPU context. 4727 * 4728 * @remarks No-long-jump zone!!! 4729 */ 4730 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4731 { 4732 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4733 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4734 4735 /* 4736 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4737 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4738 * callee-saved and thus the need for this XMM wrapper. 4739 * 4740 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4741 */ 4742 #ifdef VBOX_WITH_KERNEL_USING_XMM 4743 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 4744 pVCpu->hm.s.svm.pfnVMRun); 4745 #else 4746 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 4702 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu); 4747 4703 #endif 4748 4704 } … … 4768 4724 return uTicks - pVmcbNstGstCache->u64TSCOffset; 4769 4725 } 4770 #endif 4726 4771 4727 4772 4728 /** … … 4819 4775 } 4820 4776 4821 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.Stat Exit1, x);4777 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); 4822 4778 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 4823 4779 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); … … 4908 4864 * 4909 4865 * @returns VBox status code. 4910 * @param pVM The cross context VM structure.4911 4866 * @param pVCpu The cross context virtual CPU structure. 4912 4867 * @param pCtx Pointer to the guest-CPU context. 4913 4868 * @param pcLoops Pointer to the number of executed loops. 4914 4869 */ 4915 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)4916 { 4917 uint32_t const cMaxResumeLoops = pV M->hm.s.cMaxResumeLoops;4870 static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 4871 { 4872 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 4918 4873 Assert(pcLoops); 4919 4874 Assert(*pcLoops <= cMaxResumeLoops); … … 4933 4888 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4934 4889 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4935 rc = hmR0SvmPreRunGuest(pV M, pVCpu, pCtx, &SvmTransient);4890 rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient); 4936 4891 if (rc != VINF_SUCCESS) 4937 4892 break; … … 4944 4899 */ 4945 4900 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4946 rc = hmR0SvmRunGuest(pV M, pVCpu, pCtx);4901 rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb); 4947 4902 4948 4903 /* Restore any residual host-state and save any bits shared between host and guest … … 4955 4910 if (rc == VINF_SUCCESS) 4956 4911 rc = VERR_SVM_INVALID_GUEST_STATE; 4957 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);4958 hmR0SvmReportWorldSwitchError(pV M, pVCpu, rc, pCtx);4912 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4913 hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx); 4959 4914 break; 4960 4915 } … … 4962 4917 /* Handle the #VMEXIT. */ 4963 4918 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 4964 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);4919 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 4965 4920 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 4966 4921 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 4967 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);4922 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 4968 4923 if (rc != VINF_SUCCESS) 4969 4924 break; … … 4985 4940 * 4986 4941 * @returns VBox status code. 4987 * @param pVM The cross context VM structure.4988 4942 * @param pVCpu The cross context virtual CPU structure. 4989 4943 * @param pCtx Pointer to the guest-CPU context. 4990 4944 * @param pcLoops Pointer to the number of executed loops. 4991 4945 */ 4992 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)4993 { 4994 uint32_t const cMaxResumeLoops = pV M->hm.s.cMaxResumeLoops;4946 static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 4947 { 4948 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 4995 4949 Assert(pcLoops); 4996 4950 Assert(*pcLoops <= cMaxResumeLoops); … … 5015 4969 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5016 4970 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5017 rc = hmR0SvmPreRunGuest(pV M, pVCpu, pCtx, &SvmTransient);4971 rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient); 5018 4972 if (rc != VINF_SUCCESS) 5019 4973 break; … … 5029 4983 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 5030 4984 5031 rc = hmR0SvmRunGuest(pV M, pVCpu, pCtx);4985 rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb); 5032 4986 5033 4987 /* Restore any residual host-state and save any bits shared between host and guest … … 5040 4994 if (rc == VINF_SUCCESS) 5041 4995 rc = VERR_SVM_INVALID_GUEST_STATE; 5042 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);5043 hmR0SvmReportWorldSwitchError(pV M, pVCpu, rc, pCtx);4996 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4997 hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx); 5044 4998 return rc; 5045 4999 } … … 5047 5001 /* Handle the #VMEXIT. */ 5048 5002 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 5049 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);5003 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 5050 5004 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 5051 5005 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 5052 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);5006 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5053 5007 if (rc != VINF_SUCCESS) 5054 5008 break; … … 5091 5045 * 5092 5046 * @returns VBox status code. 5093 * @param pVM The cross context VM structure.5094 5047 * @param pVCpu The cross context virtual CPU structure. 5095 5048 * @param pCtx Pointer to the guest-CPU context. … … 5098 5051 * execution loop pass the remainder value, else pass 0. 5099 5052 */ 5100 static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)5053 static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 5101 5054 { 5102 5055 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 5103 5056 Assert(pcLoops); 5104 Assert(*pcLoops <= pV M->hm.s.cMaxResumeLoops);5057 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops); 5105 5058 5106 5059 SVMTRANSIENT SvmTransient; … … 5119 5072 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5120 5073 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5121 rc = hmR0SvmPreRunGuestNested(pV M, pVCpu, pCtx, &SvmTransient);5074 rc = hmR0SvmPreRunGuestNested(pVCpu, pCtx, &SvmTransient); 5122 5075 if ( rc != VINF_SUCCESS 5123 5076 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 5134 5087 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 5135 5088 5136 rc = hmR0SvmRunGuest Nested(pVM, pVCpu, pCtx);5089 rc = hmR0SvmRunGuest(pVCpu, pCtx, pCtx->hwvirt.svm.HCPhysVmcb); 5137 5090 5138 5091 /* Restore any residual host-state and save any bits shared between host and guest … … 5158 5111 /* Handle the #VMEXIT. */ 5159 5112 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 5160 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);5113 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 5161 5114 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 5162 5115 rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient); 5163 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);5116 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5164 5117 if ( rc != VINF_SUCCESS 5165 5118 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 5166 5119 break; 5167 if (++(*pcLoops) >= pV M->hm.s.cMaxResumeLoops)5120 if (++(*pcLoops) >= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops) 5168 5121 { 5169 5122 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); … … 5185 5138 * 5186 5139 * @returns Strict VBox status code. 5187 * @param pVM The cross context VM structure.5188 5140 * @param pVCpu The cross context virtual CPU structure. 5189 5141 * @param pCtx Pointer to the guest-CPU context. 5190 5142 */ 5191 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)5143 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 5192 5144 { 5193 5145 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 5202 5154 { 5203 5155 if (!pVCpu->hm.s.fSingleInstruction) 5204 rc = hmR0SvmRunGuestCodeNormal(pV M, pVCpu, pCtx, &cLoops);5156 rc = hmR0SvmRunGuestCodeNormal(pVCpu, pCtx, &cLoops); 5205 5157 else 5206 rc = hmR0SvmRunGuestCodeStep(pV M, pVCpu, pCtx, &cLoops);5158 rc = hmR0SvmRunGuestCodeStep(pVCpu, pCtx, &cLoops); 5207 5159 } 5208 5160 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM … … 5216 5168 if (rc == VINF_SVM_VMRUN) 5217 5169 { 5218 rc = hmR0SvmRunGuestCodeNested(pV M, pVCpu, pCtx, &cLoops);5170 rc = hmR0SvmRunGuestCodeNested(pVCpu, pCtx, &cLoops); 5219 5171 if (rc == VINF_SVM_VMEXIT) 5220 5172 rc = VINF_SUCCESS; … … 5229 5181 5230 5182 /* Prepare to return to ring-3. This will remove longjmp notifications. */ 5231 rc = hmR0SvmExitToRing3(pV M, pVCpu, pCtx, rc);5183 rc = hmR0SvmExitToRing3(pVCpu, pCtx, rc); 5232 5184 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu)); 5233 5185 return rc; … … 5954 5906 * @retval VERR_* Fatal errors. 5955 5907 * 5956 * @param pVM The cross context VM structure.5957 5908 * @param pVCpu The cross context virtual CPU structure. 5958 5909 * @param pCtx The guest CPU context. … … 5960 5911 * @remarks Updates the RIP if the instruction was executed successfully. 5961 5912 */ 5962 static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)5913 static int hmR0SvmInterpretInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx) 5963 5914 { 5964 5915 /* Only allow 32 & 64 bit code. */ … … 5966 5917 { 5967 5918 PDISSTATE pDis = &pVCpu->hm.s.DisState; 5968 int rc = EMInterpretDisasCurrent(pV M, pVCpu, pDis, NULL /* pcbInstr */);5919 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, NULL /* pcbInstr */); 5969 5920 if ( RT_SUCCESS(rc) 5970 5921 && pDis->pCurInstr->uOpcode == OP_INVLPG) … … 6447 6398 { 6448 6399 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6449 PVM pVM = pVCpu->CTX_SUFF(pVM); 6450 Assert(!pVM->hm.s.fNestedPaging); 6400 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 6451 6401 6452 6402 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx); … … 6465 6415 6466 6416 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6467 int rc = hmR0SvmInterpretInvlpg(pV M, pVCpu, pCtx); /* Updates RIP if successful. */6417 int rc = hmR0SvmInterpretInvlpg(pVCpu, pCtx); /* Updates RIP if successful. */ 6468 6418 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); 6469 6419 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6997 6947 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */ 6998 6948 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving 6999 6949 the result (in AL/AX/EAX). */ 7000 6950 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 7001 6951 … … 7243 7193 */ 7244 7194 if ( pVM->hm.s.fTprPatchingAllowed 7245 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)7246 7195 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR 7247 7196 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */ 7248 7197 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */ 7198 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7249 7199 && !CPUMIsGuestInLongModeEx(pCtx) 7250 7200 && !CPUMGetGuestCPL(pVCpu) … … 8049 7999 return VINF_SUCCESS; 8050 8000 } 8051 8052 8001 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 8053 8002 8054 8055 8003 /** @} */ 8056 8004 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r72744 r72805 40 40 VMMR0DECL(int) SVMR0GlobalInit(void); 41 41 VMMR0DECL(void) SVMR0GlobalTerm(void); 42 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);42 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu); 43 43 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO p Cpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, 45 45 bool fEnabledBySystem, void *pvArg); 46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO p Cpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 47 47 VMMR0DECL(int) SVMR0InitVM(PVM pVM); 48 48 VMMR0DECL(int) SVMR0TermVM(PVM pVM); 49 49 VMMR0DECL(int) SVMR0SetupVM(PVM pVM); 50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx); 51 51 VMMR0DECL(int) SVMR0ExportHostState(PVMCPU pVCpu); 52 52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 53 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);53 VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt); 54 54 55 55 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72802 r72805 371 371 *********************************************************************************************************************************/ 372 372 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush); 373 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);373 static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 374 374 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu); 375 375 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat); … … 377 377 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState); 378 378 #if HC_ARCH_BITS == 32 379 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);379 static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu); 380 380 #endif 381 381 #ifndef HMVMX_USE_FUNCTION_TABLE … … 446 446 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 447 447 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 448 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);448 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx); 449 449 450 450 … … 570 570 * updates VCPU's last error record as well. 571 571 * 572 * @param pVM The cross context VM structure.573 572 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 574 573 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or … … 576 575 * @param rc The error code. 577 576 */ 578 static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc) 579 { 580 AssertPtr(pVM); 577 static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc) 578 { 581 579 if ( rc == VERR_VMX_INVALID_VMCS_FIELD 582 580 || rc == VERR_VMX_UNABLE_TO_START_VM) … … 585 583 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError); 586 584 } 587 pV M->hm.s.lLastError = rc;585 pVCpu->CTX_SUFF(pVM)->hm.s.lLastError = rc; 588 586 } 589 587 … … 1089 1087 * 1090 1088 * @returns VBox status code. 1091 * @param p CpuPointer to the global CPU info struct.1089 * @param pHostCpu Pointer to the global CPU info struct. 1092 1090 * @param pVM The cross context VM structure. Can be 1093 1091 * NULL after a host resume operation. … … 1100 1098 * @param pvMsrs Opaque pointer to VMXMSRS struct. 1101 1099 */ 1102 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO p Cpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,1100 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 1103 1101 void *pvMsrs) 1104 1102 { 1105 Assert(p Cpu);1103 Assert(pHostCpu); 1106 1104 Assert(pvMsrs); 1107 1105 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1124 1122 { 1125 1123 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS); 1126 p Cpu->fFlushAsidBeforeUse = false;1124 pHostCpu->fFlushAsidBeforeUse = false; 1127 1125 } 1128 1126 else 1129 p Cpu->fFlushAsidBeforeUse = true;1127 pHostCpu->fFlushAsidBeforeUse = true; 1130 1128 1131 1129 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */ 1132 ++p Cpu->cTlbFlushes;1130 ++pHostCpu->cTlbFlushes; 1133 1131 1134 1132 return VINF_SUCCESS; … … 1140 1138 * 1141 1139 * @returns VBox status code. 1142 * @param p CpuPointer to the global CPU info struct.1140 * @param pHostCpu Pointer to the global CPU info struct. 1143 1141 * @param pvCpuPage Pointer to the VMXON region. 1144 1142 * @param HCPhysCpuPage Physical address of the VMXON region. … … 1147 1145 * similar was used to enable VT-x on the host. 1148 1146 */ 1149 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 1150 { 1151 NOREF(pCpu); 1152 NOREF(pvCpuPage); 1153 NOREF(HCPhysCpuPage); 1147 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 1148 { 1149 RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage); 1154 1150 1155 1151 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1787 1783 * 1788 1784 * @returns VBox status code. 1789 * @param pVM The cross context VM structure.1790 1785 * @param pVCpu The cross context virtual CPU structure of the calling 1791 1786 * EMT. Can be NULL depending on @a enmFlush. … … 1796 1791 * @remarks Can be called with interrupts disabled. 1797 1792 */ 1798 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr) 1799 { 1800 NOREF(pVM); 1801 AssertPtr(pVM); 1802 Assert(pVM->hm.s.vmx.fVpid); 1793 static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr) 1794 { 1795 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid); 1803 1796 1804 1797 uint64_t au64Descriptor[2]; … … 1817 1810 } 1818 1811 1819 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);1820 AssertMsg(rc == VINF_SUCCESS, 1821 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush,pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));1812 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); 1813 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmFlush, 1814 pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc)); 1822 1815 if ( RT_SUCCESS(rc) 1823 1816 && pVCpu) … … 1825 1818 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid); 1826 1819 } 1820 NOREF(rc); 1827 1821 } 1828 1822 … … 1833 1827 * 1834 1828 * @returns VBox status code. 1835 * @param pVM The cross context VM structure.1836 1829 * @param pVCpu The cross context virtual CPU structure. 1837 1830 * @param GCVirt Guest virtual address of the page to invalidate. 1838 1831 */ 1839 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 1840 { 1841 AssertPtr(pVM); 1832 VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 1833 { 1842 1834 AssertPtr(pVCpu); 1843 LogFlowFunc(("pV M=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));1835 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt)); 1844 1836 1845 1837 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); … … 1853 1845 * as this function maybe called in a loop with individual addresses. 1854 1846 */ 1847 PVM pVM = pVCpu->CTX_SUFF(pVM); 1855 1848 if (pVM->hm.s.vmx.fVpid) 1856 1849 { … … 1869 1862 if (fVpidFlush) 1870 1863 { 1871 hmR0VmxFlushVpid(pV M, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);1864 hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt); 1872 1865 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt); 1873 1866 } … … 1887 1880 * case where neither EPT nor VPID is supported by the CPU. 1888 1881 * 1889 * @param pVM The cross context VM structure.1890 1882 * @param pVCpu The cross context virtual CPU structure. 1891 1883 * @param pCpu Pointer to the global HM struct. … … 1893 1885 * @remarks Called with interrupts disabled. 1894 1886 */ 1895 static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)1887 static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 1896 1888 { 1897 1889 AssertPtr(pVCpu); 1898 1890 AssertPtr(pCpu); 1899 NOREF(pVM);1900 1891 1901 1892 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); … … 1912 1903 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary. 1913 1904 * 1914 * @param pVM The cross context VM structure.1915 1905 * @param pVCpu The cross context virtual CPU structure. 1916 1906 * @param pCpu Pointer to the global HM CPU struct. 1917 * @remarks All references to "ASID" in this function pertains to "VPID" in 1918 * Intel's nomenclature. The reason is, to avoid confusion in compare 1919 * statements since the host-CPU copies are named "ASID". 1920 * 1921 * @remarks Called with interrupts disabled. 1922 */ 1923 static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 1907 * 1908 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's 1909 * nomenclature. The reason is, to avoid confusion in compare statements 1910 * since the host-CPU copies are named "ASID". 1911 * 1912 * @remarks Called with interrupts disabled. 1913 */ 1914 static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 1924 1915 { 1925 1916 #ifdef VBOX_WITH_STATISTICS … … 1935 1926 #endif 1936 1927 1937 AssertPtr(pVM);1938 1928 AssertPtr(pCpu); 1939 1929 AssertPtr(pVCpu); 1940 1930 Assert(pCpu->idCpu != NIL_RTCPUID); 1941 1931 1932 PVM pVM = pVCpu->CTX_SUFF(pVM); 1942 1933 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid, 1943 1934 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled." … … 2017 2008 * 2018 2009 * @returns VBox status code. 2019 * @param pVM The cross context VM structure.2020 2010 * @param pVCpu The cross context virtual CPU structure. 2021 2011 * @param pCpu Pointer to the global HM CPU struct. … … 2023 2013 * @remarks Called with interrupts disabled. 2024 2014 */ 2025 static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2026 { 2027 AssertPtr(pVM); 2015 static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2016 { 2028 2017 AssertPtr(pVCpu); 2029 2018 AssertPtr(pCpu); 2030 2019 Assert(pCpu->idCpu != NIL_RTCPUID); 2031 AssertMsg(pV M->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));2032 AssertMsg(!pV M->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));2020 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging.")); 2021 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID.")); 2033 2022 2034 2023 /* … … 2055 2044 if (pVCpu->hm.s.fForceTLBFlush) 2056 2045 { 2057 hmR0VmxFlushEpt(pVCpu, pV M->hm.s.vmx.enmFlushEpt);2046 hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmFlushEpt); 2058 2047 pVCpu->hm.s.fForceTLBFlush = false; 2059 2048 } … … 2065 2054 * 2066 2055 * @returns VBox status code. 2067 * @param pVM The cross context VM structure.2068 2056 * @param pVCpu The cross context virtual CPU structure. 2069 2057 * @param pCpu Pointer to the global HM CPU struct. … … 2071 2059 * @remarks Called with interrupts disabled. 2072 2060 */ 2073 static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2074 { 2075 AssertPtr(pVM); 2061 static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2062 { 2076 2063 AssertPtr(pVCpu); 2077 2064 AssertPtr(pCpu); 2078 2065 Assert(pCpu->idCpu != NIL_RTCPUID); 2079 AssertMsg(pV M->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));2080 AssertMsg(!pV M->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));2066 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID.")); 2067 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging")); 2081 2068 2082 2069 /* 2083 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last. 2084 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB 2085 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore. 2070 * Force a TLB flush for the first world switch if the current CPU differs from the one we 2071 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID 2072 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we 2073 * cannot reuse the current ASID anymore. 2086 2074 */ 2087 2075 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu … … 2096 2084 { 2097 2085 /* 2098 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb()) 2099 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the 2100 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case. 2086 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see 2087 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an 2088 * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to 2089 * include fExplicitFlush's too) - an obscure corner case. 2101 2090 */ 2102 2091 pVCpu->hm.s.fForceTLBFlush = true; … … 2104 2093 } 2105 2094 2095 PVM pVM = pVCpu->CTX_SUFF(pVM); 2106 2096 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2107 2097 if (pVCpu->hm.s.fForceTLBFlush) … … 2121 2111 { 2122 2112 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT) 2123 hmR0VmxFlushVpid(pV M, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);2113 hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2124 2114 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS) 2125 2115 { 2126 hmR0VmxFlushVpid(pV M, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);2116 hmR0VmxFlushVpid(pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */); 2127 2117 pCpu->fFlushAsidBeforeUse = false; 2128 2118 } … … 2162 2152 switch (pVM->hm.s.vmx.uFlushTaggedTlb) 2163 2153 { 2164 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pV M, pVCpu, pCpu); break;2165 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pV M, pVCpu, pCpu); break;2166 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pV M, pVCpu, pCpu); break;2167 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pV M, pVCpu, pCpu); break;2154 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break; 2155 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu); break; 2156 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break; 2157 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break; 2168 2158 default: 2169 2159 AssertMsgFailed(("Invalid flush-tag function identifier\n")); … … 2279 2269 * 2280 2270 * @returns VBox status code. 2281 * @param pVM The cross context VM structure.2282 2271 * @param pVCpu The cross context virtual CPU structure. 2283 2272 */ 2284 static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu) 2285 { 2286 AssertPtr(pVM); 2273 static int hmR0VmxSetupPinCtls(PVMCPU pVCpu) 2274 { 2287 2275 AssertPtr(pVCpu); 2288 2276 2277 PVM pVM = pVCpu->CTX_SUFF(pVM); 2289 2278 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */ 2290 2279 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ … … 2333 2322 * 2334 2323 * @returns VBox status code. 2335 * @param pVM The cross context VM structure.2336 2324 * @param pVCpu The cross context virtual CPU structure. 2337 2325 */ 2338 static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu) 2339 { 2340 AssertPtr(pVM); 2326 static int hmR0VmxSetupProcCtls(PVMCPU pVCpu) 2327 { 2341 2328 AssertPtr(pVCpu); 2342 2329 2343 2330 int rc = VERR_INTERNAL_ERROR_5; 2331 PVM pVM = pVCpu->CTX_SUFF(pVM); 2344 2332 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2345 2333 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ … … 2555 2543 * 2556 2544 * @returns VBox status code. 2557 * @param pVM The cross context VM structure.2558 2545 * @param pVCpu The cross context virtual CPU structure. 2559 2546 */ 2560 static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu) 2561 { 2562 NOREF(pVM); 2563 AssertPtr(pVM); 2547 static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu) 2548 { 2564 2549 AssertPtr(pVCpu); 2565 2550 … … 2627 2612 * 2628 2613 * @returns VBox status code. 2629 * @param pVM The cross context VM structure.2630 2614 * @param pVCpu The cross context virtual CPU structure. 2631 2615 */ 2632 static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu) 2633 { 2634 AssertPtr(pVM); 2616 static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu) 2617 { 2635 2618 AssertPtr(pVCpu); 2636 2619 2637 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 2638 2639 uint32_t u32XcptBitmap = 0; 2620 uint32_t u32XcptBitmap; 2640 2621 2641 2622 /* Must always intercept #AC to prevent the guest from hanging the CPU. */ 2642 u32XcptBitmap |= RT_BIT_32(X86_XCPT_AC);2623 u32XcptBitmap = RT_BIT_32(X86_XCPT_AC); 2643 2624 2644 2625 /* Because we need to maintain the DR6 state even when intercepting DRx reads … … 2648 2629 2649 2630 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */ 2650 if (!pV M->hm.s.fNestedPaging)2631 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 2651 2632 u32XcptBitmap |= RT_BIT(X86_XCPT_PF); 2652 2633 2653 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;2634 /* Commit it to the VMCS. */ 2654 2635 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap); 2655 2636 AssertRCReturn(rc, rc); 2656 return rc; 2637 2638 /* Update our cache of the exception bitmap. */ 2639 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap; 2640 return VINF_SUCCESS; 2657 2641 } 2658 2642 … … 2767 2751 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */ 2768 2752 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2769 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2770 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2753 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc), 2754 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2771 2755 2772 2756 /* Load this VMCS as the current VMCS. */ 2773 2757 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2774 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2775 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2776 2777 rc = hmR0VmxSetupPinCtls(pV M, pVCpu);2778 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2779 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2780 2781 rc = hmR0VmxSetupProcCtls(pV M, pVCpu);2782 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2783 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2784 2785 rc = hmR0VmxSetupMiscCtls(pV M, pVCpu);2786 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2787 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2788 2789 rc = hmR0VmxInitXcptBitmap(pV M, pVCpu);2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2791 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2758 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc), 2759 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2760 2761 rc = hmR0VmxSetupPinCtls(pVCpu); 2762 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc), 2763 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2764 2765 rc = hmR0VmxSetupProcCtls(pVCpu); 2766 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc), 2767 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2768 2769 rc = hmR0VmxSetupMiscCtls(pVCpu); 2770 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc), 2771 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2772 2773 rc = hmR0VmxInitXcptBitmap(pVCpu); 2774 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc), 2775 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2792 2776 2793 2777 #if HC_ARCH_BITS == 32 2794 rc = hmR0VmxInitVmcsReadCache(pV M, pVCpu);2795 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2796 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2778 rc = hmR0VmxInitVmcsReadCache(pVCpu); 2779 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc), 2780 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2797 2781 #endif 2798 2782 2799 2783 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */ 2800 2784 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2801 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2802 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc), rc);2785 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc), 2786 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2803 2787 2804 2788 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 2805 2789 2806 hmR0VmxUpdateErrorRecord(pV M, pVCpu, rc);2790 hmR0VmxUpdateErrorRecord(pVCpu, rc); 2807 2791 } 2808 2792 … … 2981 2965 2982 2966 /* 2983 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" 2984 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the2985 * limit as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU2986 * behavior. However, several hosts either insists on 0xfff being the limit (Windows2987 * Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there2988 * but botches sidt alignment in at least one consumer). So, we're only allowing the2989 * IDTR.LIMIT to be leftat 0xffff on hosts where we are sure it won't cause trouble.2967 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and 2968 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit 2969 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior. 2970 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or 2971 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt 2972 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left 2973 * at 0xffff on hosts where we are sure it won't cause trouble. 2990 2974 */ 2991 2975 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) … … 3598 3582 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 3599 3583 Let us assert it as such and use 32-bit VMWRITE. */ 3600 Assert(! (pMixedCtx->rflags.u64 >> 32));3584 Assert(!RT_HI_U32(pMixedCtx->rflags.u64)); 3601 3585 X86EFLAGS fEFlags = pMixedCtx->eflags; 3602 3586 Assert(fEFlags.u32 & X86_EFL_RA1_MASK); … … 3770 3754 uCr0Mask &= ~X86_CR0_PE; 3771 3755 #endif 3772 /* Update the HMCPU's copy of the CR0 mask. */3773 pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask;3774 3775 3756 /* 3776 3757 * Finally, update VMCS fields with the CR0 values. … … 3778 3759 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, uGuestCR0); 3779 3760 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, uShadowCR0); 3780 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask); 3761 if (uCR0Mask != pVCpu->hm.s.vmx.u32CR0Mask) 3762 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask); 3781 3763 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 3782 3764 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 3783 3765 AssertRCReturn(rc, rc); 3766 3767 pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask; 3784 3768 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 3785 3769 … … 3922 3906 { 3923 3907 Assert(!RT_HI_U32(pMixedCtx->cr4)); 3924 uint32_t uGuestCR4 = pMixedCtx->cr4; 3925 3926 /* The guest's view of its CR4 is unblemished. */ 3927 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uGuestCR4); 3928 AssertRCReturn(rc, rc); 3929 Log4Func(("uShadowCR4=%#RX32\n", uGuestCR4)); 3908 uint32_t uGuestCR4 = pMixedCtx->cr4; 3909 uint32_t const uShadowCR4 = pMixedCtx->cr4; 3930 3910 3931 3911 /* … … 3997 3977 uGuestCR4 &= fZapCR4; 3998 3978 3999 /* Write VT-x's view of the guest CR4 into the VMCS. */4000 Log4Func(("uGuestCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, fSetCR4, fZapCR4));4001 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4);4002 AssertRCReturn(rc, rc);4003 4004 3979 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */ 4005 3980 uint32_t u32CR4Mask = X86_CR4_VME … … 4012 3987 if (pVM->cpum.ro.GuestFeatures.fPcid) 4013 3988 u32CR4Mask |= X86_CR4_PCIDE; 3989 3990 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow into the VMCS. */ 3991 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4); 3992 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uShadowCR4); 3993 if (pVCpu->hm.s.vmx.u32CR4Mask != u32CR4Mask) 3994 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask); 3995 AssertRCReturn(rc, rc); 4014 3996 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask; 4015 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);4016 AssertRCReturn(rc, rc);4017 3997 4018 3998 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ … … 4020 4000 4021 4001 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4002 4003 Log4Func(("uGuestCR4=%#RX32 uShadowCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, uShadowCR4, fSetCR4, 4004 fZapCR4)); 4022 4005 } 4023 4006 return rc; … … 4185 4168 * Strict function to validate segment registers. 4186 4169 * 4170 * @param pVCpu The cross context virtual CPU structure. 4171 * @param pCtx Pointer to the guest-CPU context. 4172 * 4187 4173 * @remarks Will import guest CR0 on strict builds during validation of 4188 4174 * segments. 4189 4175 */ 4190 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx)4176 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pCtx) 4191 4177 { 4192 4178 /* … … 4197 4183 * and doesn't change the guest-context value. 4198 4184 */ 4185 PVM pVM = pVCpu->CTX_SUFF(pVM); 4199 4186 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 4200 4187 if ( !pVM->hm.s.vmx.fUnrestrictedGuest … … 4298 4285 /* 64-bit capable CPUs. */ 4299 4286 # if HC_ARCH_BITS == 64 4300 Assert(! (pCtx->cs.u64Base >> 32));4301 Assert(!pCtx->ss.Attr.u || ! (pCtx->ss.u64Base >> 32));4302 Assert(!pCtx->ds.Attr.u || ! (pCtx->ds.u64Base >> 32));4303 Assert(!pCtx->es.Attr.u || ! (pCtx->es.u64Base >> 32));4287 Assert(!RT_HI_U32(pCtx->cs.u64Base)); 4288 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base)); 4289 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base)); 4290 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base)); 4304 4291 # endif 4305 4292 } … … 4347 4334 /* 64-bit capable CPUs. */ 4348 4335 # if HC_ARCH_BITS == 64 4349 Assert(! (pCtx->cs.u64Base >> 32));4350 Assert(!u32SSAttr || ! (pCtx->ss.u64Base >> 32));4351 Assert(!u32DSAttr || ! (pCtx->ds.u64Base >> 32));4352 Assert(!u32ESAttr || ! (pCtx->es.u64Base >> 32));4336 Assert(!RT_HI_U32(pCtx->cs.u64Base)); 4337 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base)); 4338 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base)); 4339 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base)); 4353 4340 # endif 4354 4341 } … … 4475 4462 4476 4463 #ifdef VBOX_STRICT 4477 /* Validate. */ 4478 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx); 4464 hmR0VmxValidateSegmentRegs(pVCpu, pMixedCtx); 4479 4465 #endif 4480 4466 … … 4494 4480 { 4495 4481 /* 4496 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved4497 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.4498 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.4482 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is 4483 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest 4484 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup. 4499 4485 */ 4500 4486 uint16_t u16Sel = 0; … … 4882 4868 * 4883 4869 * @returns VBox status code, no informational status codes. 4884 * @param pVM The cross context VM structure.4885 4870 * @param pVCpu The cross context virtual CPU structure. 4886 4871 * @param pCtx Pointer to the guest-CPU context. … … 4888 4873 * @remarks No-long-jump zone!!! 4889 4874 */ 4890 DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)4875 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx) 4891 4876 { 4892 4877 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ … … 4902 4887 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED); 4903 4888 /** @todo Add stats for resume vs launch. */ 4889 PVM pVM = pVCpu->CTX_SUFF(pVM); 4904 4890 #ifdef VBOX_WITH_KERNEL_USING_XMM 4905 4891 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM); … … 4915 4901 * Reports world-switch error and dumps some useful debug info. 4916 4902 * 4917 * @param pVM The cross context VM structure.4918 4903 * @param pVCpu The cross context virtual CPU structure. 4919 4904 * @param rcVMRun The return code from VMLAUNCH/VMRESUME. … … 4922 4907 * exitReason updated). 4923 4908 */ 4924 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient) 4925 { 4926 Assert(pVM); 4909 static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient) 4910 { 4927 4911 Assert(pVCpu); 4928 4912 Assert(pCtx); … … 5007 4991 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc); 5008 4992 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg)); 5009 if (pV M->hm.s.fNestedPaging)4993 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 5010 4994 { 5011 4995 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc); … … 5020 5004 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc); 5021 5005 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val)); 5022 if (pV M->hm.s.vmx.fVpid)5006 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid) 5023 5007 { 5024 5008 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc); … … 5126 5110 break; 5127 5111 } 5128 NOREF(p VM); NOREF(pCtx);5112 NOREF(pCtx); 5129 5113 } 5130 5114 … … 5341 5325 * 5342 5326 * @returns VBox status code. 5343 * @param pVM The cross context VM structure.5344 5327 * @param pVCpu The cross context virtual CPU structure. 5345 5328 */ 5346 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu) 5347 { 5348 #define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \ 5349 { \ 5350 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \ 5351 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \ 5352 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \ 5353 ++cReadFields; \ 5354 } 5355 5356 AssertPtr(pVM); 5357 AssertPtr(pVCpu); 5329 static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu) 5330 { 5331 #define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \ 5332 do { \ 5333 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \ 5334 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \ 5335 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \ 5336 ++cReadFields; \ 5337 } while (0) 5338 5358 5339 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 5359 5340 uint32_t cReadFields = 0; … … 5411 5392 #endif 5412 5393 5413 if (pV M->hm.s.fNestedPaging)5394 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 5414 5395 { 5415 5396 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); … … 5569 5550 * 5570 5551 * @returns VBox status code. 5571 * @param pVM The cross context VM structure.5572 5552 * @param pVCpu The cross context virtual CPU structure. 5573 5553 * 5574 5554 * @remarks No-long-jump zone!!! 5575 5555 */ 5576 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)5556 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu) 5577 5557 { 5578 5558 int rc; 5579 5559 bool fOffsettedTsc; 5580 5560 bool fParavirtTsc; 5561 PVM pVM = pVCpu->CTX_SUFF(pVM); 5581 5562 if (pVM->hm.s.vmx.fUsePreemptTimer) 5582 5563 { … … 6365 6346 { 6366 6347 uint32_t u32Shadow; 6367 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */6368 6348 if (fWhat & CPUMCTX_EXTRN_CR0) 6369 6349 { … … 6378 6358 } 6379 6359 6380 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */6381 6360 if (fWhat & CPUMCTX_EXTRN_CR4) 6382 6361 { … … 6391 6370 if (fWhat & CPUMCTX_EXTRN_CR3) 6392 6371 { 6372 /* CR0.PG bit changes are always intercepted, so it's up to date. */ 6393 6373 if ( pVM->hm.s.vmx.fUnrestrictedGuest 6394 6374 || ( pVM->hm.s.fNestedPaging 6395 && CPUMIsGuestPagingEnabledEx(pCtx))) /* PG bit changes are always intercepted, so it's up to date. */6375 && CPUMIsGuestPagingEnabledEx(pCtx))) 6396 6376 { 6397 6377 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val); … … 6402 6382 } 6403 6383 6404 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 6384 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. 6385 Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */ 6405 6386 if (CPUMIsGuestInPAEModeEx(pCtx)) 6406 6387 { … … 6501 6482 * to the EM loop. 6502 6483 * 6503 * @param pVM The cross context VM structure.6504 6484 * @param pVCpu The cross context virtual CPU structure. 6505 6485 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 6508 6488 * @param fStepping Running in hmR0VmxRunGuestCodeStep(). 6509 6489 */ 6510 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)6490 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping) 6511 6491 { 6512 6492 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 6493 6494 /* Pending HM CR3 sync. */ 6495 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6496 { 6497 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); 6498 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 6499 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 6500 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); 6501 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6502 } 6503 6504 /* Pending HM PAE PDPEs. */ 6505 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 6506 { 6507 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 6508 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6509 } 6513 6510 6514 6511 /* 6515 6512 * Anything pending? Should be more likely than not if we're doing a good job. 6516 6513 */ 6514 PVM pVM = pVCpu->CTX_SUFF(pVM); 6517 6515 if ( !fStepping 6518 6516 ? !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK) … … 6521 6519 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) ) 6522 6520 return VINF_SUCCESS; 6523 6524 #if 06525 /* We need the control registers now, make sure the guest-CPU context is updated. */6526 int rc3 = hmR0VmxImportGuestStatae(pVCpu, CPUMCTX_EXTRN_CR0);6527 AssertRCReturn(rc3, rc3);6528 6529 /** @todo r=ramshankar: VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES6530 * are not part of VMCPU_FF_HP_R0_PRE_HM_MASK. Hence, the two if6531 * statements below won't ever be entered. Consider removing it or6532 * determine if it is necessary to add these flags to VMCPU_FF_HP_R0_PRE_HM_MASK. */6533 /* Pending HM CR3 sync. */6534 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))6535 {6536 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);6537 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,6538 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);6539 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6540 }6541 6542 /* Pending HM PAE PDPEs. */6543 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))6544 {6545 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);6546 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6547 }6548 #endif6549 6521 6550 6522 /* Pending PGM C3 sync. */ … … 6813 6785 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 6814 6786 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 6815 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.Stat Exit1);6816 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit 2);6787 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit); 6788 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling); 6817 6789 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO); 6818 6790 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx); … … 6915 6887 * 6916 6888 * @returns VBox status code. 6917 * @param pVM The cross context VM structure.6918 6889 * @param pVCpu The cross context virtual CPU structure. 6919 6890 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 6923 6894 * VINF_VMM_UNKNOWN_RING3_CALL. 6924 6895 */ 6925 static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit) 6926 { 6927 Assert(pVM); 6896 static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit) 6897 { 6928 6898 Assert(pVCpu); 6929 6899 Assert(pMixedCtx); … … 6973 6943 | CPUM_CHANGED_TR 6974 6944 | CPUM_CHANGED_HIDDEN_SEL_REGS); 6975 if ( pV M->hm.s.fNestedPaging6945 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 6976 6946 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) 6977 6947 { … … 7314 7284 } 7315 7285 #endif 7316 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%# x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,7317 (uint8_t)uIntType));7286 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 7287 uIntType)); 7318 7288 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7319 7289 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, … … 7583 7553 * directly (register modified by us, not by 7584 7554 * hardware on VM-entry). 7585 *7586 * @remarks Requires CR0!7587 7555 */ 7588 7556 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, … … 7590 7558 { 7591 7559 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ 7592 AssertMsg( u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));7560 AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo)); 7593 7561 Assert(pfIntrState); 7594 7562 … … 7601 7569 /* 7602 7570 * Validate the error-code-valid bit for hardware exceptions. 7603 * No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" 7571 * No error codes for exceptions in real-mode. 7572 * 7573 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" 7604 7574 */ 7605 7575 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT … … 7630 7600 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]); 7631 7601 7632 /* We require CR0 to check if the guest is in real-mode. */7633 /** @todo No we don't, since CR0.PE is always intercepted. */7634 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);7635 AssertRCReturn(rc, rc);7636 7637 7602 /* 7638 7603 * Hardware interrupts & exceptions cannot be delivered through the software interrupt … … 7643 7608 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 7644 7609 */ 7645 if (CPUMIsGuestInRealModeEx(pMixedCtx)) 7610 if (CPUMIsGuestInRealModeEx(pMixedCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */ 7646 7611 { 7647 7612 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest) 7648 7613 { 7649 7614 /* 7650 * For unrestricted execution enabled CPUs running real-mode guests, we must not .7651 * set the deliver-error-code bit 7652 * .7615 * For unrestricted execution enabled CPUs running real-mode guests, we must not 7616 * set the deliver-error-code bit. 7617 * 7653 7618 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 7654 7619 */ … … 7662 7627 7663 7628 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7664 rc= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK7665 | CPUMCTX_EXTRN_TABLE_MASK7666 | CPUMCTX_EXTRN_RIP7667 | CPUMCTX_EXTRN_RSP7668 | CPUMCTX_EXTRN_RFLAGS);7669 AssertRCReturn(rc , rc);7629 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 7630 | CPUMCTX_EXTRN_TABLE_MASK 7631 | CPUMCTX_EXTRN_RIP 7632 | CPUMCTX_EXTRN_RSP 7633 | CPUMCTX_EXTRN_RFLAGS); 7634 AssertRCReturn(rc2, rc2); 7670 7635 7671 7636 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */ … … 7681 7646 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState); 7682 7647 7683 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */ 7684 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */ 7685 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, 7686 fStepping, pfIntrState); 7648 /* 7649 * If we're injecting an event with no valid IDT entry, inject a #GP. 7650 * No error codes for exceptions in real-mode. 7651 * 7652 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" 7653 */ 7654 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, 7655 pfIntrState); 7687 7656 } 7688 7657 … … 7701 7670 X86IDTR16 IdtEntry; 7702 7671 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry; 7703 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);7704 AssertRCReturn(rc , rc);7672 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry); 7673 AssertRCReturn(rc2, rc2); 7705 7674 7706 7675 /* Construct the stack frame for the interrupt/exception handler. */ … … 7763 7732 7764 7733 /* Inject. */ 7765 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);7734 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo); 7766 7735 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo)) 7767 7736 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode); … … 7769 7738 AssertRCReturn(rc, rc); 7770 7739 7740 /* Update CR2. */ 7771 7741 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7772 7742 && uVector == X86_XCPT_PF) 7773 7743 pMixedCtx->cr2 = GCPtrFaultAddress; 7774 7744 7775 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, 7776 pMixedCtx->cr2)); 7745 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2)); 7777 7746 7778 7747 return VINF_SUCCESS; … … 7811 7780 * 7812 7781 * @returns VBox status code. 7813 * @param pVM The cross context VM structure.7814 7782 * @param pVCpu The cross context virtual CPU structure. 7815 * @param pCpu Pointer to the CPU info struct. 7816 */ 7817 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 7818 { 7819 AssertPtr(pVM); 7783 * @param pHostCpu Pointer to the global CPU info struct. 7784 */ 7785 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu) 7786 { 7820 7787 AssertPtr(pVCpu); 7821 Assert(pV M->hm.s.vmx.fSupported);7788 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported); 7822 7789 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7823 NOREF(pCpu); NOREF(pVM);7824 7825 LogFlowFunc(("pV M=%p pVCpu=%p\n", pVM, pVCpu));7790 RT_NOREF(pHostCpu); 7791 7792 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 7826 7793 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 7827 7794 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); … … 7912 7879 /* Initialize the bare minimum state required for HM. This takes care of 7913 7880 initializing VT-x if necessary (onlined CPUs, local init etc.) */ 7914 int rc = HMR0EnterCpu(pVCpu);7881 int rc = hmR0EnterCpu(pVCpu); 7915 7882 AssertRC(rc); 7916 7883 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) … … 8009 7976 * mapped (e.g. EFI32). 8010 7977 * 8011 * @param pVM The cross context VM structure.8012 7978 * @param pVCpu The cross context virtual CPU structure. 8013 7979 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 8017 7983 * @remarks No-long-jump zone!!! 8018 7984 */ 8019 static VBOXSTRICTRC hmR0VmxExportGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 8020 { 8021 AssertPtr(pVM); 7985 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 7986 { 8022 7987 AssertPtr(pVCpu); 8023 7988 AssertPtr(pMixedCtx); 8024 7989 HMVMX_ASSERT_PREEMPT_SAFE(); 8025 7990 8026 LogFlowFunc(("pV M=%p pVCpu=%p\n", pVM, pVCpu));7991 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 8027 7992 8028 7993 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); … … 8030 7995 /* Determine real-on-v86 mode. */ 8031 7996 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 8032 if ( !pV M->hm.s.vmx.fUnrestrictedGuest8033 && CPUMIsGuestInRealModeEx(pMixedCtx))7997 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 7998 && CPUMIsGuestInRealModeEx(pMixedCtx)) 8034 7999 { 8035 8000 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; … … 8108 8073 * Exports the state shared between the host and guest into the VMCS. 8109 8074 * 8110 * @param pVM The cross context VM structure.8111 8075 * @param pVCpu The cross context virtual CPU structure. 8112 8076 * @param pCtx Pointer to the guest-CPU context. … … 8114 8078 * @remarks No-long-jump zone!!! 8115 8079 */ 8116 static void hmR0VmxExportSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 8117 { 8118 NOREF(pVM); 8119 8080 static void hmR0VmxExportSharedState(PVMCPU pVCpu, PCPUMCTX pCtx) 8081 { 8120 8082 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 8121 8083 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 8154 8116 * mapped (e.g. EFI32). 8155 8117 * 8156 * @param pVM The cross context VM structure.8157 8118 * @param pVCpu The cross context virtual CPU structure. 8158 8119 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 8162 8123 * @remarks No-long-jump zone!!! 8163 8124 */ 8164 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx)8125 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 8165 8126 { 8166 8127 HMVMX_ASSERT_PREEMPT_SAFE(); … … 8189 8150 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8190 8151 { 8191 rcStrict = hmR0VmxExportGuestState(pV M, pVCpu, pMixedCtx);8152 rcStrict = hmR0VmxExportGuestState(pVCpu, pMixedCtx); 8192 8153 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8193 8154 { /* likely */} … … 8236 8197 * @retval VINF_* scheduling changes, we have to go back to ring-3. 8237 8198 * 8238 * @param pVM The cross context VM structure.8239 8199 * @param pVCpu The cross context virtual CPU structure. 8240 8200 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 8247 8207 * dispatching took place. 8248 8208 */ 8249 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)8209 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping) 8250 8210 { 8251 8211 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 8256 8216 8257 8217 /* Check force flag actions that might require us to go back to ring-3. */ 8258 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pV M, pVCpu, pMixedCtx, fStepping);8218 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx, fStepping); 8259 8219 if (rcStrict == VINF_SUCCESS) 8260 8220 { /* FFs doesn't get set all the time. */ } … … 8270 8230 * This is the reason we do it here and not in hmR0VmxExportGuestState(). 8271 8231 */ 8232 PVM pVM = pVCpu->CTX_SUFF(pVM); 8272 8233 if ( !pVCpu->hm.s.vmx.u64MsrApicBase 8273 8234 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) … … 8298 8259 8299 8260 /* 8300 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with 8301 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM. 8261 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus 8262 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might 8263 * also result in triple-faulting the VM. 8302 8264 */ 8303 8265 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping); … … 8329 8291 * Hence, loading of the guest state needs to be done -after- injection of events. 8330 8292 */ 8331 rcStrict = hmR0VmxExportGuestStateOptimal(pV M, pVCpu, pMixedCtx);8293 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pMixedCtx); 8332 8294 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8333 8295 { /* likely */ } … … 8339 8301 8340 8302 /* 8341 * We disable interrupts so that we don't miss any interrupts that would flag 8342 * preemption (IPI/timers etc.) when thread-context hooks aren't used and we've8343 * been running with preemption disabled for a while. Since this is purly to aid8344 * the RTThreadPreemptIsPending code, it doesn't matter that it may temporarily8345 * reenable anddisable interrupt on NT.8303 * We disable interrupts so that we don't miss any interrupts that would flag preemption 8304 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with 8305 * preemption disabled for a while. Since this is purly to aid the 8306 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and 8307 * disable interrupt on NT. 8346 8308 * 8347 * We need to check for force-flags that could've possible been altered since we last checked them (e.g. 8348 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}). 8309 * We need to check for force-flags that could've possible been altered since we last 8310 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section, 8311 * see @bugref{6398}). 8349 8312 * 8350 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before8351 * executing guest code.8313 * We also check a couple of other force-flags as a last opportunity to get the EMT back 8314 * to ring-3 before executing guest code. 8352 8315 */ 8353 8316 pVmxTransient->fEFlags = ASMIntDisableFlags(); … … 8392 8355 * point. 8393 8356 * 8394 * @param pVM The cross context VM structure.8395 8357 * @param pVCpu The cross context virtual CPU structure. 8396 8358 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 8402 8364 * @remarks No-long-jump zone!!! 8403 8365 */ 8404 static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)8366 static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 8405 8367 { 8406 8368 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 8414 8376 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 8415 8377 8378 PVM pVM = pVCpu->CTX_SUFF(pVM); 8416 8379 if (!CPUMIsGuestFPUStateActive(pVCpu)) 8417 8380 { … … 8449 8412 */ 8450 8413 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE) 8451 hmR0VmxExportSharedState(pV M, pVCpu, pMixedCtx);8414 hmR0VmxExportSharedState(pVCpu, pMixedCtx); 8452 8415 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 8453 8416 … … 8477 8440 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 8478 8441 { 8479 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pV M, pVCpu);8442 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu); 8480 8443 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false; 8481 8444 } … … 8532 8495 #endif 8533 8496 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 8534 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pV M, pVCpu, pMixedCtx);8497 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx); 8535 8498 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8536 8499 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); … … 8567 8530 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.u64TSCOffset); 8568 8531 8569 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.Stat Exit1, x);8532 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); 8570 8533 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 8571 8534 Assert(!ASMIntAreEnabled()); … … 8664 8627 * 8665 8628 * @returns VBox status code. 8666 * @param pVM The cross context VM structure.8667 8629 * @param pVCpu The cross context virtual CPU structure. 8668 8630 * @param pCtx Pointer to the guest-CPU context. … … 8670 8632 * @note Mostly the same as hmR0VmxRunGuestCodeStep(). 8671 8633 */ 8672 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)8634 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx) 8673 8635 { 8674 8636 VMXTRANSIENT VmxTransient; … … 8685 8647 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 8686 8648 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 8687 rcStrict = hmR0VmxPreRunGuest(pV M, pVCpu, pCtx, &VmxTransient, false /* fStepping */);8649 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, false /* fStepping */); 8688 8650 if (rcStrict != VINF_SUCCESS) 8689 8651 break; 8690 8652 8691 hmR0VmxPreRunGuestCommitted(pV M, pVCpu, pCtx, &VmxTransient);8692 int rcRun = hmR0VmxRunGuest(pV M, pVCpu, pCtx);8653 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient); 8654 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx); 8693 8655 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ 8694 8656 … … 8702 8664 else 8703 8665 { 8704 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);8705 hmR0VmxReportWorldSwitchError(pV M, pVCpu, rcRun, pCtx, &VmxTransient);8666 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 8667 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient); 8706 8668 return rcRun; 8707 8669 } … … 8711 8673 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); 8712 8674 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 8713 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);8675 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 8714 8676 HMVMX_START_EXIT_DISPATCH_PROF(); 8715 8677 … … 8722 8684 rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason); 8723 8685 #endif 8724 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);8686 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 8725 8687 if (rcStrict == VINF_SUCCESS) 8726 8688 { 8727 if (cLoops <= pV M->hm.s.cMaxResumeLoops)8689 if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops) 8728 8690 continue; /* likely */ 8729 8691 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); … … 9229 9191 * 9230 9192 * @returns Strict VBox status code (i.e. informational status codes too). 9231 * @param pVM The cross context VM structure.9232 9193 * @param pVCpu The cross context virtual CPU structure. 9233 9194 * @param pMixedCtx Pointer to the guest-CPU context. … … 9238 9199 * and to the point. No longer than 33 chars long, please. 9239 9200 */ 9240 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx,9241 PVMXTRANSIENT pVmxTransient,uint32_t uExitReason)9201 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 9202 uint32_t uExitReason) 9242 9203 { 9243 9204 /* … … 9586 9547 * one, in order to avoid event nesting. 9587 9548 */ 9549 PVM pVM = pVCpu->CTX_SUFF(pVM); 9588 9550 if ( enmEvent1 != DBGFEVENT_END 9589 9551 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1)) … … 9613 9575 * 9614 9576 * @returns Strict VBox status code (i.e. informational status codes too). 9615 * @param pVM The cross context VM structure.9616 9577 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 9617 9578 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 9622 9583 * @param pDbgState The debug state. 9623 9584 */ 9624 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,9585 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 9625 9586 uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState) 9626 9587 { … … 9748 9709 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) ) 9749 9710 { 9750 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pV M, pVCpu, pMixedCtx, pVmxTransient, uExitReason);9711 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pMixedCtx, pVmxTransient, uExitReason); 9751 9712 if (rcStrict != VINF_SUCCESS) 9752 9713 return rcStrict; … … 9768 9729 * 9769 9730 * @returns Strict VBox status code (i.e. informational status codes too). 9770 * @param pVM The cross context VM structure.9771 9731 * @param pVCpu The cross context virtual CPU structure. 9772 9732 * @param pCtx Pointer to the guest-CPU context. … … 9774 9734 * @note Mostly the same as hmR0VmxRunGuestCodeNormal(). 9775 9735 */ 9776 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)9736 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, PCPUMCTX pCtx) 9777 9737 { 9778 9738 VMXTRANSIENT VmxTransient; … … 9806 9766 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 9807 9767 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */ 9808 rcStrict = hmR0VmxPreRunGuest(pV M, pVCpu, pCtx, &VmxTransient, fStepping);9768 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, fStepping); 9809 9769 if (rcStrict != VINF_SUCCESS) 9810 9770 break; 9811 9771 9812 hmR0VmxPreRunGuestCommitted(pV M, pVCpu, pCtx, &VmxTransient);9772 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient); 9813 9773 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */ 9814 9774 … … 9816 9776 * Now we can run the guest code. 9817 9777 */ 9818 int rcRun = hmR0VmxRunGuest(pV M, pVCpu, pCtx);9778 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx); 9819 9779 9820 9780 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ … … 9831 9791 else 9832 9792 { 9833 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);9834 hmR0VmxReportWorldSwitchError(pV M, pVCpu, rcRun, pCtx, &VmxTransient);9793 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 9794 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient); 9835 9795 return rcRun; 9836 9796 } … … 9840 9800 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); 9841 9801 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 9842 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);9802 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 9843 9803 HMVMX_START_EXIT_DISPATCH_PROF(); 9844 9804 … … 9848 9808 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug(). 9849 9809 */ 9850 rcStrict = hmR0VmxRunDebugHandleExit(pV M, pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);9851 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);9810 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState); 9811 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 9852 9812 if (rcStrict != VINF_SUCCESS) 9853 9813 break; 9854 if (cLoops > pV M->hm.s.cMaxResumeLoops)9814 if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops) 9855 9815 { 9856 9816 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); … … 10058 10018 * 10059 10019 * @returns Strict VBox status code (i.e. informational status codes too). 10060 * @param pVM The cross context VM structure.10061 10020 * @param pVCpu The cross context virtual CPU structure. 10062 10021 * @param pCtx Pointer to the guest-CPU context. 10063 10022 */ 10064 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)10023 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 10065 10024 { 10066 10025 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 10074 10033 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled()) 10075 10034 && !DBGFIsStepping(pVCpu) 10076 && !pV M->dbgf.ro.cEnabledInt3Breakpoints)10077 rcStrict = hmR0VmxRunGuestCodeNormal(pV M, pVCpu, pCtx);10035 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints) 10036 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, pCtx); 10078 10037 else 10079 rcStrict = hmR0VmxRunGuestCodeDebug(pV M, pVCpu, pCtx);10038 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, pCtx); 10080 10039 10081 10040 if (rcStrict == VERR_EM_INTERPRETER) … … 10084 10043 rcStrict = VINF_EM_TRIPLE_FAULT; 10085 10044 10086 int rc2 = hmR0VmxExitToRing3(pV M, pVCpu, pCtx, rcStrict);10045 int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx, rcStrict); 10087 10046 if (RT_FAILURE(rc2)) 10088 10047 { … … 10297 10256 * wrong with the guest state. 10298 10257 * 10299 * @param pVM The cross context VM structure.10300 10258 * @param pVCpu The cross context virtual CPU structure. 10301 10259 * @param pCtx Pointer to the guest-CPU state. … … 10304 10262 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded. 10305 10263 */ 10306 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)10264 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx) 10307 10265 { 10308 10266 #define HMVMX_ERROR_BREAK(err) { uError = (err); break; } … … 10312 10270 } else do { } while (0) 10313 10271 10314 int rc; 10315 uint32_t uError = VMX_IGS_ERROR; 10316 uint32_t u32Val; 10317 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest; 10272 int rc; 10273 PVM pVM = pVCpu->CTX_SUFF(pVM); 10274 uint32_t uError = VMX_IGS_ERROR; 10275 uint32_t u32Val; 10276 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest; 10318 10277 10319 10278 do … … 10628 10587 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 10629 10588 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 10630 HMVMX_CHECK_BREAK(! (pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);10631 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->ss.u64Base >> 32),10589 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 10590 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 10632 10591 VMX_IGS_LONGMODE_SS_BASE_INVALID); 10633 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->ds.u64Base >> 32),10592 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 10634 10593 VMX_IGS_LONGMODE_DS_BASE_INVALID); 10635 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->es.u64Base >> 32),10594 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 10636 10595 VMX_IGS_LONGMODE_ES_BASE_INVALID); 10637 10596 #endif … … 10684 10643 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 10685 10644 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 10686 HMVMX_CHECK_BREAK(! (pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);10687 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->ss.u64Base >> 32),10645 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID); 10646 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base), 10688 10647 VMX_IGS_LONGMODE_SS_BASE_INVALID); 10689 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->ds.u64Base >> 32),10648 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base), 10690 10649 VMX_IGS_LONGMODE_DS_BASE_INVALID); 10691 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || ! (pCtx->es.u64Base >> 32),10650 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base), 10692 10651 VMX_IGS_LONGMODE_ES_BASE_INVALID); 10693 10652 #endif … … 11607 11566 AssertRCReturn(rc, rc); 11608 11567 11609 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu ->CTX_SUFF(pVM), pVCpu, pMixedCtx);11568 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx); 11610 11569 NOREF(uInvalidReason); 11611 11570 … … 11640 11599 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc); 11641 11600 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 11601 11602 hmR0DumpRegs(pVCpu, pMixedCtx); 11642 11603 #else 11643 11604 NOREF(pVmxTransient); 11644 11605 #endif 11645 11606 11646 hmR0DumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);11647 11607 return VERR_VMX_INVALID_GUEST_STATE; 11648 11608 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r72744 r72805 29 29 #ifdef IN_RING0 30 30 31 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);31 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu); 32 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO p Cpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, 34 34 bool fEnabledBySystem, void *pvMsrs); 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO p Cpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 36 36 VMMR0DECL(int) VMXR0GlobalInit(void); 37 37 VMMR0DECL(void) VMXR0GlobalTerm(void); … … 40 40 VMMR0DECL(int) VMXR0SetupVM(PVM pVM); 41 41 VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu); 42 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);42 VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt); 43 43 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx); 45 45 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 46 46 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r72778 r72805 1237 1237 * Enter HM context. 1238 1238 */ 1239 rc = HMR0Enter(pV M, pVCpu);1239 rc = HMR0Enter(pVCpu); 1240 1240 if (RT_SUCCESS(rc)) 1241 1241 { -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r72785 r72805 140 140 EXIT_REASON(VMX_EXIT_INVPCID , 58, "INVPCID instruction."), 141 141 EXIT_REASON(VMX_EXIT_VMFUNC , 59, "VMFUNC instruction."), 142 EXIT_REASON(VMX_EXIT_ENCLS , 60, "ENCLS instru nction."),142 EXIT_REASON(VMX_EXIT_ENCLS , 60, "ENCLS instruction."), 143 143 EXIT_REASON(VMX_EXIT_RDSEED , 61, "RDSEED instruction."), 144 144 EXIT_REASON(VMX_EXIT_PML_FULL , 62, "Page-modification log full."), … … 258 258 EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal (host)."), 259 259 EXIT_REASON(SVM_EXIT_VINTR , 100, "Virtual interrupt-window exit."), 260 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE, 101, " Write to CR0 that changed any bits other than CR0.TS or CR0.MP."),261 EXIT_REASON(SVM_EXIT_IDTR_READ , 102, "Read IDTR "),262 EXIT_REASON(SVM_EXIT_GDTR_READ , 103, "Read GDTR "),260 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE, 101, "Selective CR0 Write (to bits other than CR0.TS and CR0.MP)."), 261 EXIT_REASON(SVM_EXIT_IDTR_READ , 102, "Read IDTR."), 262 EXIT_REASON(SVM_EXIT_GDTR_READ , 103, "Read GDTR."), 263 263 EXIT_REASON(SVM_EXIT_LDTR_READ , 104, "Read LDTR."), 264 264 EXIT_REASON(SVM_EXIT_TR_READ , 105, "Read TR."), … … 280 280 EXIT_REASON(SVM_EXIT_INVLPG , 121, "INVLPG instruction."), 281 281 EXIT_REASON(SVM_EXIT_INVLPGA , 122, "INVLPGA instruction."), 282 EXIT_REASON(SVM_EXIT_IOIO , 123, "IN/OUT accessing protected port."),282 EXIT_REASON(SVM_EXIT_IOIO , 123, "IN/OUT/INS/OUTS instruction."), 283 283 EXIT_REASON(SVM_EXIT_MSR , 124, "RDMSR or WRMSR access to protected MSR."), 284 284 EXIT_REASON(SVM_EXIT_TASK_SWITCH , 125, "Task switch."), 285 EXIT_REASON(SVM_EXIT_FERR_FREEZE , 126, " Legacy FPU handling enabled; CPU frozen in an x87/mmx instr.waiting for interrupt."),285 EXIT_REASON(SVM_EXIT_FERR_FREEZE , 126, "FERR Freeze; CPU frozen in an x87/mmx instruction waiting for interrupt."), 286 286 EXIT_REASON(SVM_EXIT_SHUTDOWN , 127, "Shutdown."), 287 287 EXIT_REASON(SVM_EXIT_VMRUN , 128, "VMRUN instruction."), … … 883 883 "/PROF/CPU%d/HM/StatEntry", i); 884 884 AssertRC(rc); 885 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.Stat Exit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,886 "Profiling of VMXR0RunGuestCode exit part 1",885 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPreExit, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, 886 "Profiling of pre-exit processing after returning from GC", 887 887 "/PROF/CPU%d/HM/SwitchFromGC_1", i); 888 888 AssertRC(rc); 889 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit 2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,890 "Profiling of VMXR0RunGuestCode exit part 2",889 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitHandling, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, 890 "Profiling of exit handling (longjmps not included!)", 891 891 "/PROF/CPU%d/HM/SwitchFromGC_2", i); 892 892 AssertRC(rc); … … 958 958 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception."); 959 959 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions."); 960 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", " Guest attempted to execute HLT.");961 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", " Guest attempted to execute RDMSR.");962 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", " Guest attempted to execute WRMSR.");963 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", " Guest attempted to execute MWAIT.");964 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", " Guest attempted to execute MONITOR.");965 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR-Write", " Guest attempted to write a debug register.");966 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR-Read", " Guest attempted to read a debug register.");967 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR0", " Guest attempted to read CR0.");968 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR2", " Guest attempted to read CR2.");969 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR3", " Guest attempted to read CR3.");970 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR4", " Guest attempted to read CR4.");971 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR8", " Guest attempted to read CR8.");972 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR0", " Guest attempted to write CR0.");973 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR2", " Guest attempted to write CR2.");974 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR3", " Guest attempted to write CR3.");975 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR4", " Guest attempted to write CR4.");976 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR8", " Guest attempted to write CR8.");977 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", " Guest attempted to execute CLTS.");978 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", " Guest attempted to execute LMSW.");979 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", " Guest attempted to execute CLI.");980 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", " Guest attempted to execute STI.");981 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", " Guest attempted to execute PUSHF.");982 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", " Guest attempted to execute POPF.");983 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", " Guest attempted to execute IRET.");984 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", " Guest attempted to execute INT.");985 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "G uest attempted to access descriptor table register (GDTR, IDTR, LDTR).");960 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "HLT instruction."); 961 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "RDMSR instruction."); 962 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "WRMSR instruction."); 963 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", "MWAIT instruction."); 964 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", "MONITOR instruction."); 965 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write."); 966 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read."); 967 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR0", "CR0 read."); 968 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR2", "CR2 read."); 969 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR3", "CR3 read."); 970 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR4", "CR4 read."); 971 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR8", "CR8 read."); 972 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR0", "CR0 write."); 973 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR2", "CR2 write."); 974 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR3", "CR3 write."); 975 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR4", "CR4 write."); 976 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR8", "CR8 write."); 977 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction."); 978 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction."); 979 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", "CLI instruction."); 980 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", "STI instruction."); 981 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", "PUSHF instruction."); 982 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", "POPF instruction."); 983 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", "IRET instruction."); 984 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "INT instruction."); 985 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access."); 986 986 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write", "I/O write."); 987 987 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/IO/Read", "I/O read."); … … 989 989 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/IO/ReadString", "String I/O read."); 990 990 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again."); 991 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", " Host interrupt received.");991 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host)."); 992 992 #endif 993 993 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHostNmiInGC, "/HM/CPU%d/Exit/HostNmiInGC", "Host NMI received while in guest context."); … … 995 995 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer, "/HM/CPU%d/Exit/PreemptTimer", "VMX-preemption timer expired."); 996 996 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest."); 997 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", " Guest attempted a task switch.");997 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Task switch."); 998 998 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag."); 999 999 HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page."); … … 1590 1590 1591 1591 /* 1592 * Construct a 1024 element page directory with 4 MB pages for 1593 * the identity mapped page table used in real and protected mode 1594 * without paging with EPT. 1592 * Construct a 1024 element page directory with 4 MB pages for the identity mapped 1593 * page table used in real and protected mode without paging with EPT. 1595 1594 */ 1596 1595 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3); … … 2104 2103 #ifdef LOG_ENABLED 2105 2104 char szOutput[256]; 2106 2107 2105 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE, 2108 2106 szOutput, sizeof(szOutput), NULL); … … 2473 2471 * pop ECX [59] 2474 2472 * jmp return_address [E9 return_address] 2475 *2476 2473 */ 2477 2474 bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX); … … 2524 2521 * pop ECX [59] 2525 2522 * jmp return_address [E9 return_address] 2526 *2527 2523 */ 2528 2524 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32); … … 2730 2726 /* 2731 2727 * The following two requirements are VT-x specific: 2732 * - G bit must be set if any high limit bits are set.2733 * - G bit must be clear if any low limit bits are clear.2728 * - G bit must be set if any high limit bits are set. 2729 * - G bit must be clear if any low limit bits are clear. 2734 2730 */ 2735 2731 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity) … … 2773 2769 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE)) 2774 2770 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P), 2775 ("%#x\n", pSel->Attr.u), 2776 false); 2771 ("%#x\n", pSel->Attr.u), false); 2777 2772 2778 2773 /* DPL must equal RPL. 2779 2774 Note! This is also a hard requirement like above. */ 2780 2775 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL), 2781 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), 2782 false); 2776 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false); 2783 2777 2784 2778 /* 2785 2779 * The following two requirements are VT-x specific: 2786 * - G bit must be set if any high limit bits are set.2787 * - G bit must be clear if any low limit bits are clear.2780 * - G bit must be set if any high limit bits are set. 2781 * - G bit must be clear if any low limit bits are clear. 2788 2782 */ 2789 2783 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity) … … 2843 2837 2844 2838 /* If we're still executing the IO code, then return false. */ 2845 if ( 2846 && 2847 && 2848 && 2839 if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled) 2840 && pCtx->rip < pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200 2841 && pCtx->rip > pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200 2842 && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0) 2849 2843 return false; 2850 2844 … … 2875 2869 if (CPUMIsGuestInRealModeEx(pCtx)) 2876 2870 { 2877 /* In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector 2871 /* 2872 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector 2878 2873 * bases and limits, i.e. limit must be 64K and base must be selector * 16. 2879 2874 * If this is not true, we cannot execute real mode as V86 and have to fall … … 2904 2899 else 2905 2900 { 2906 /* Verify the requirements for executing code in protected 2907 mode. VT-x can't handle the CPU state right after a switch 2908 from real to protected mode. (all sorts of RPL & DPL assumptions). */ 2901 /* 2902 * Verify the requirements for executing code in protected mode. VT-x can't 2903 * handle the CPU state right after a switch from real to protected mode 2904 * (all sorts of RPL & DPL assumptions). 2905 */ 2909 2906 if (pVCpu->hm.s.vmx.fWasInRealMode) 2910 2907 { … … 2945 2942 else 2946 2943 { 2947 if ( 2948 && 2944 if ( !CPUMIsGuestInLongModeEx(pCtx) 2945 && !pVM->hm.s.vmx.fUnrestrictedGuest) 2949 2946 { 2950 2947 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */ … … 2956 2953 return false; 2957 2954 2958 /* The guest is about to complete the switch to protected mode. Wait a bit longer. */ 2959 /* Windows XP; switch to protected mode; all selectors are marked not present in the 2960 * hidden registers (possible recompiler bug; see load_seg_vm) */ 2955 /* 2956 * The guest is about to complete the switch to protected mode. Wait a bit longer. 2957 * Windows XP; switch to protected mode; all selectors are marked not present 2958 * in the hidden registers (possible recompiler bug; see load_seg_vm). 2959 */ 2961 2960 /** @todo Is this supposed recompiler bug still relevant with IEM? */ 2962 2961 if (pCtx->cs.Attr.n.u1Present == 0) … … 2965 2964 return false; 2966 2965 2967 /* Windows XP: possible same as above, but new recompiler requires new heuristics? 2968 VT-x doesn't seem to like something about the guest state and this stuff avoids it. */ 2966 /* 2967 * Windows XP: possible same as above, but new recompiler requires new 2968 * heuristics? VT-x doesn't seem to like something about the guest state and 2969 * this stuff avoids it. 2970 */ 2969 2971 /** @todo This check is actually wrong, it doesn't take the direction of the 2970 2972 * stack segment into account. But, it does the job for now. */ -
trunk/src/VBox/VMM/include/HMInternal.h
r72744 r72805 921 921 } EmulateIoBlock; 922 922 923 /* */923 /* Pending IO operation. */ 924 924 struct 925 925 { … … 957 957 958 958 STAMPROFILEADV StatEntry; 959 STAMPROFILEADV Stat Exit1;960 STAMPROFILEADV StatExit 2;959 STAMPROFILEADV StatPreExit; 960 STAMPROFILEADV StatExitHandling; 961 961 STAMPROFILEADV StatExitIO; 962 962 STAMPROFILEADV StatExitMovCRx; … … 1114 1114 #ifdef IN_RING0 1115 1115 VMMR0_INT_DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void); 1116 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu); 1116 1117 1117 1118 # ifdef VBOX_STRICT 1118 VMMR0_INT_DECL(void) hmR0DumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);1119 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx); 1119 1120 VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg); 1120 # else 1121 # define hmR0DumpRegs(a, b ,c) do { } while (0) 1122 # define hmR0DumpDescriptor(a, b, c) do { } while (0) 1123 # endif /* VBOX_STRICT */ 1121 # endif 1124 1122 1125 1123 # ifdef VBOX_WITH_KERNEL_USING_XMM … … 1127 1125 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1128 1126 # endif 1129 1130 1127 #endif /* IN_RING0 */ 1131 1128
Note:
See TracChangeset
for help on using the changeset viewer.