Changeset 72805 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Jul 3, 2018 4:05:43 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72786 r72805 73 73 # endif 74 74 #endif /* !VBOX_WITH_STATISTICS */ 75 76 75 77 76 /** If we decide to use a function table approach this can be useful to … … 528 527 * 529 528 * @returns VBox status code. 530 * @param p CpuPointer to the CPU info struct.529 * @param pHostCpu Pointer to the CPU info struct. 531 530 * @param pVM The cross context VM structure. Can be 532 531 * NULL after a resume! … … 536 535 * @param pvArg Unused on AMD-V. 537 536 */ 538 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO p Cpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,537 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 539 538 void *pvArg) 540 539 { … … 560 559 if ( pVM 561 560 && pVM->hm.s.svm.fIgnoreInUseError) 562 p Cpu->fIgnoreAMDVInUseError = true;563 564 if (!p Cpu->fIgnoreAMDVInUseError)561 pHostCpu->fIgnoreAMDVInUseError = true; 562 563 if (!pHostCpu->fIgnoreAMDVInUseError) 565 564 { 566 565 ASMSetFlags(fEFlags); … … 584 583 * entirely with before executing any guest code. 585 584 */ 586 p Cpu->fFlushAsidBeforeUse = true;585 pHostCpu->fFlushAsidBeforeUse = true; 587 586 588 587 /* 589 588 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}. 590 589 */ 591 ++p Cpu->cTlbFlushes;590 ++pHostCpu->cTlbFlushes; 592 591 593 592 return VINF_SUCCESS; … … 599 598 * 600 599 * @returns VBox status code. 601 * @param p CpuPointer to the CPU info struct.600 * @param pHostCpu Pointer to the CPU info struct. 602 601 * @param pvCpuPage Pointer to the global CPU page. 603 602 * @param HCPhysCpuPage Physical address of the global CPU page. 604 603 */ 605 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO p Cpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)604 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 606 605 { 607 606 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 609 608 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); 610 609 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 611 NOREF(pCpu);610 RT_NOREF(pHostCpu); 612 611 613 612 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ … … 1169 1168 * 1170 1169 * @returns VBox status code. 1171 * @param pVM The cross context VM structure.1172 1170 * @param pVCpu The cross context virtual CPU structure. 1173 1171 * @param GCVirt Guest virtual address of the page to invalidate. 1174 1172 */ 1175 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 1176 { 1177 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1178 Assert(pVM->hm.s.svm.fSupported); 1179 1180 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1173 VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 1174 { 1175 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 1176 1177 bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1181 1178 1182 1179 /* Skip it if a TLB flush is already pending. */ … … 1610 1607 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1611 1608 1612 Assert( RT_HI_U32(uShadowCr0) == 0);1609 Assert(!RT_HI_U32(uShadowCr0)); 1613 1610 if (pVmcb->guest.u64CR0 != uShadowCr0) 1614 1611 { … … 1723 1720 1724 1721 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */ 1725 Assert( RT_HI_U32(uShadowCr4) == 0);1722 Assert(!RT_HI_U32(uShadowCr4)); 1726 1723 pVmcb->guest.u64CR4 = uShadowCr4; 1727 1724 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS); … … 2337 2334 * 2338 2335 * @returns VBox status code. 2339 * @param pVM The cross context VM structure.2340 2336 * @param pVCpu The cross context virtual CPU structure. 2341 * @param pCpu Pointer to the CPU info struct. 2342 */ 2343 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2344 { 2345 AssertPtr(pVM); 2337 * @param pHostCpu Pointer to the CPU info struct. 2338 */ 2339 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu) 2340 { 2346 2341 AssertPtr(pVCpu); 2347 Assert(pV M->hm.s.svm.fSupported);2342 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 2348 2343 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2349 NOREF(pVM); NOREF(pCpu);2350 2351 LogFlowFunc(("pV M=%p pVCpu=%p\n", pVM, pVCpu));2344 RT_NOREF(pHostCpu); 2345 2346 LogFlowFunc(("pVCpu=%p\n", pVCpu)); 2352 2347 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 2353 2348 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); … … 2410 2405 * initializing AMD-V if necessary (onlined CPUs, local init etc.) 2411 2406 */ 2412 int rc = HMR0EnterCpu(pVCpu);2407 int rc = hmR0EnterCpu(pVCpu); 2413 2408 AssertRC(rc); NOREF(rc); 2414 2409 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) … … 3126 3121 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 3127 3122 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 3128 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.Stat Exit1);3129 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit 2);3123 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit); 3124 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling); 3130 3125 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 3131 3126 … … 3255 3250 * 3256 3251 * @returns VBox status code. 3257 * @param pVM The cross context VM structure.3258 3252 * @param pVCpu The cross context virtual CPU structure. 3259 3253 * @param pCtx Pointer to the guest-CPU context. … … 3261 3255 * VINF_VMM_UNKNOWN_RING3_CALL. 3262 3256 */ 3263 static int hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 3264 { 3265 Assert(pVM); 3257 static int hmR0SvmExitToRing3(PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 3258 { 3266 3259 Assert(pVCpu); 3267 3260 Assert(pCtx); … … 3270 3263 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 3271 3264 VMMRZCallRing3Disable(pVCpu); 3272 Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions)); 3265 Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, 3266 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions)); 3273 3267 3274 3268 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 3290 3284 | CPUM_CHANGED_TR 3291 3285 | CPUM_CHANGED_HIDDEN_SEL_REGS); 3292 if ( pV M->hm.s.fNestedPaging3286 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 3293 3287 && CPUMIsGuestPagingEnabledEx(pCtx)) 3294 3288 { … … 3331 3325 * intercepts. 3332 3326 * 3333 * @param pVM The cross context VM structure.3334 * @param pVCpu The cross context virtual CPU structure.3335 3327 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 3336 3328 * @param pVmcb Pointer to the VM control block. … … 3338 3330 * @remarks No-long-jump zone!!! 3339 3331 */ 3340 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)3332 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb) 3341 3333 { 3342 3334 /* … … 3347 3339 bool fParavirtTsc; 3348 3340 uint64_t uTscOffset; 3349 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pV M, pVCpu, &uTscOffset, &fParavirtTsc);3341 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc); 3350 3342 3351 3343 bool fIntercept; … … 4079 4071 * Reports world-switch error and dumps some useful debug info. 4080 4072 * 4081 * @param pVM The cross context VM structure.4082 4073 * @param pVCpu The cross context virtual CPU structure. 4083 4074 * @param rcVMRun The return code from VMRUN (or … … 4086 4077 * @param pCtx Pointer to the guest-CPU context. 4087 4078 */ 4088 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 4089 { 4090 NOREF(pCtx); 4079 static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 4080 { 4091 4081 HMSVM_ASSERT_PREEMPT_SAFE(); 4092 4082 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 4093 4083 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4094 4084 4095 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4096 4085 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE) 4097 4086 { 4098 hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM);4099 /** @todo We probably don't need to dump this anymore or we can expand4100 * hmR0DumpRegs()? */4101 4087 #ifdef VBOX_STRICT 4088 hmR0DumpRegs(pVCpu, pCtx); 4089 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 4102 4090 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); 4103 4091 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); … … 4225 4213 Log4Func(("rcVMRun=%d\n", rcVMRun)); 4226 4214 4227 NOREF(p Vmcb);4215 NOREF(pCtx); 4228 4216 } 4229 4217 … … 4245 4233 * to the EM loop. 4246 4234 * 4247 * @param pVM The cross context VM structure.4248 4235 * @param pVCpu The cross context virtual CPU structure. 4249 4236 * @param pCtx Pointer to the guest-CPU context. 4250 4237 */ 4251 static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)4238 static int hmR0SvmCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pCtx) 4252 4239 { 4253 4240 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 4262 4249 APICUpdatePendingInterrupts(pVCpu); 4263 4250 4251 PVM pVM = pVCpu->CTX_SUFF(pVM); 4264 4252 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction 4265 4253 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK) … … 4324 4312 * @retval VINF_* scheduling changes, we have to go back to ring-3. 4325 4313 * 4326 * @param pVM The cross context VM structure.4327 4314 * @param pVCpu The cross context virtual CPU structure. 4328 4315 * @param pCtx Pointer to the nested-guest-CPU context. … … 4332 4319 * @sa hmR0SvmPreRunGuest. 4333 4320 */ 4334 static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4321 static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4335 4322 { 4336 4323 HMSVM_ASSERT_PREEMPT_SAFE(); … … 4343 4330 4344 4331 /* Check force flag actions that might require us to go back to ring-3. */ 4345 int rc = hmR0SvmCheckForceFlags(pV M, pVCpu, pCtx);4332 int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx); 4346 4333 if (rc != VINF_SUCCESS) 4347 4334 return rc; … … 4364 4351 * NB: If we could continue a task switch exit we wouldn't need to do this. 4365 4352 */ 4353 PVM pVM = pVCpu->CTX_SUFF(pVM); 4366 4354 if (RT_UNLIKELY( !pVM->hm.s.svm.u32Features 4367 4355 && pVCpu->hm.s.Event.fPending … … 4446 4434 * @retval VINF_* scheduling changes, we have to go back to ring-3. 4447 4435 * 4448 * @param pVM The cross context VM structure.4449 4436 * @param pVCpu The cross context virtual CPU structure. 4450 4437 * @param pCtx Pointer to the guest-CPU context. 4451 4438 * @param pSvmTransient Pointer to the SVM transient structure. 4452 4439 */ 4453 static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4440 static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4454 4441 { 4455 4442 HMSVM_ASSERT_PREEMPT_SAFE(); … … 4457 4444 4458 4445 /* Check force flag actions that might require us to go back to ring-3. */ 4459 int rc = hmR0SvmCheckForceFlags(pV M, pVCpu, pCtx);4446 int rc = hmR0SvmCheckForceFlags(pVCpu, pCtx); 4460 4447 if (rc != VINF_SUCCESS) 4461 4448 return rc; … … 4471 4458 * NB: If we could continue a task switch exit we wouldn't need to do this. 4472 4459 */ 4460 PVM pVM = pVCpu->CTX_SUFF(pVM); 4473 4461 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI))) 4474 4462 if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features)) … … 4604 4592 || fMigratedHostCpu) 4605 4593 { 4606 hmR0SvmUpdateTscOffsetting(pV M, pVCpu, pCtx, pVmcb);4594 hmR0SvmUpdateTscOffsetting(pVCpu, pCtx, pVmcb); 4607 4595 pSvmTransient->fUpdateTscOffsetting = false; 4608 4596 } … … 4687 4675 4688 4676 /** 4689 * Wrapper for running the guest code in AMD-V.4677 * Wrapper for running the guest (or nested-guest) code in AMD-V. 4690 4678 * 4691 4679 * @returns VBox strict status code. 4692 * @param pVM The cross context VM structure.4693 4680 * @param pVCpu The cross context virtual CPU structure. 4694 4681 * @param pCtx Pointer to the guest-CPU context. 4682 * @param HCPhysVmcb The host physical address of the VMCB. 4695 4683 * 4696 4684 * @remarks No-long-jump zone!!! 4697 4685 */ 4698 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)4686 DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, RTHCPHYS HCPhysVmcb) 4699 4687 { 4700 4688 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ … … 4708 4696 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4709 4697 */ 4698 PVM pVM = pVCpu->CTX_SUFF(pVM); 4710 4699 #ifdef VBOX_WITH_KERNEL_USING_XMM 4711 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 4712 pVCpu->hm.s.svm.pfnVMRun); 4700 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun); 4713 4701 #else 4714 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 4715 #endif 4716 } 4717 4718 4719 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4720 /** 4721 * Wrapper for running the nested-guest code in AMD-V. 4722 * 4723 * @returns VBox strict status code. 4724 * @param pVM The cross context VM structure. 4725 * @param pVCpu The cross context virtual CPU structure. 4726 * @param pCtx Pointer to the guest-CPU context. 4727 * 4728 * @remarks No-long-jump zone!!! 4729 */ 4730 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4731 { 4732 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4733 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4734 4735 /* 4736 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4737 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4738 * callee-saved and thus the need for this XMM wrapper. 4739 * 4740 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4741 */ 4742 #ifdef VBOX_WITH_KERNEL_USING_XMM 4743 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 4744 pVCpu->hm.s.svm.pfnVMRun); 4745 #else 4746 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 4702 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu); 4747 4703 #endif 4748 4704 } … … 4768 4724 return uTicks - pVmcbNstGstCache->u64TSCOffset; 4769 4725 } 4770 #endif 4726 4771 4727 4772 4728 /** … … 4819 4775 } 4820 4776 4821 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.Stat Exit1, x);4777 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); 4822 4778 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 4823 4779 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); … … 4908 4864 * 4909 4865 * @returns VBox status code. 4910 * @param pVM The cross context VM structure.4911 4866 * @param pVCpu The cross context virtual CPU structure. 4912 4867 * @param pCtx Pointer to the guest-CPU context. 4913 4868 * @param pcLoops Pointer to the number of executed loops. 4914 4869 */ 4915 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)4916 { 4917 uint32_t const cMaxResumeLoops = pV M->hm.s.cMaxResumeLoops;4870 static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 4871 { 4872 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 4918 4873 Assert(pcLoops); 4919 4874 Assert(*pcLoops <= cMaxResumeLoops); … … 4933 4888 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4934 4889 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4935 rc = hmR0SvmPreRunGuest(pV M, pVCpu, pCtx, &SvmTransient);4890 rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient); 4936 4891 if (rc != VINF_SUCCESS) 4937 4892 break; … … 4944 4899 */ 4945 4900 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4946 rc = hmR0SvmRunGuest(pV M, pVCpu, pCtx);4901 rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb); 4947 4902 4948 4903 /* Restore any residual host-state and save any bits shared between host and guest … … 4955 4910 if (rc == VINF_SUCCESS) 4956 4911 rc = VERR_SVM_INVALID_GUEST_STATE; 4957 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);4958 hmR0SvmReportWorldSwitchError(pV M, pVCpu, rc, pCtx);4912 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4913 hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx); 4959 4914 break; 4960 4915 } … … 4962 4917 /* Handle the #VMEXIT. */ 4963 4918 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 4964 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);4919 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 4965 4920 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 4966 4921 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 4967 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);4922 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 4968 4923 if (rc != VINF_SUCCESS) 4969 4924 break; … … 4985 4940 * 4986 4941 * @returns VBox status code. 4987 * @param pVM The cross context VM structure.4988 4942 * @param pVCpu The cross context virtual CPU structure. 4989 4943 * @param pCtx Pointer to the guest-CPU context. 4990 4944 * @param pcLoops Pointer to the number of executed loops. 4991 4945 */ 4992 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)4993 { 4994 uint32_t const cMaxResumeLoops = pV M->hm.s.cMaxResumeLoops;4946 static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 4947 { 4948 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops; 4995 4949 Assert(pcLoops); 4996 4950 Assert(*pcLoops <= cMaxResumeLoops); … … 5015 4969 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5016 4970 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5017 rc = hmR0SvmPreRunGuest(pV M, pVCpu, pCtx, &SvmTransient);4971 rc = hmR0SvmPreRunGuest(pVCpu, pCtx, &SvmTransient); 5018 4972 if (rc != VINF_SUCCESS) 5019 4973 break; … … 5029 4983 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 5030 4984 5031 rc = hmR0SvmRunGuest(pV M, pVCpu, pCtx);4985 rc = hmR0SvmRunGuest(pVCpu, pCtx, pVCpu->hm.s.svm.HCPhysVmcb); 5032 4986 5033 4987 /* Restore any residual host-state and save any bits shared between host and guest … … 5040 4994 if (rc == VINF_SUCCESS) 5041 4995 rc = VERR_SVM_INVALID_GUEST_STATE; 5042 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.Stat Exit1, x);5043 hmR0SvmReportWorldSwitchError(pV M, pVCpu, rc, pCtx);4996 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 4997 hmR0SvmReportWorldSwitchError(pVCpu, rc, pCtx); 5044 4998 return rc; 5045 4999 } … … 5047 5001 /* Handle the #VMEXIT. */ 5048 5002 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 5049 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);5003 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 5050 5004 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb); 5051 5005 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 5052 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);5006 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5053 5007 if (rc != VINF_SUCCESS) 5054 5008 break; … … 5091 5045 * 5092 5046 * @returns VBox status code. 5093 * @param pVM The cross context VM structure.5094 5047 * @param pVCpu The cross context virtual CPU structure. 5095 5048 * @param pCtx Pointer to the guest-CPU context. … … 5098 5051 * execution loop pass the remainder value, else pass 0. 5099 5052 */ 5100 static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)5053 static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops) 5101 5054 { 5102 5055 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 5103 5056 Assert(pcLoops); 5104 Assert(*pcLoops <= pV M->hm.s.cMaxResumeLoops);5057 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops); 5105 5058 5106 5059 SVMTRANSIENT SvmTransient; … … 5119 5072 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5120 5073 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5121 rc = hmR0SvmPreRunGuestNested(pV M, pVCpu, pCtx, &SvmTransient);5074 rc = hmR0SvmPreRunGuestNested(pVCpu, pCtx, &SvmTransient); 5122 5075 if ( rc != VINF_SUCCESS 5123 5076 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 5134 5087 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 5135 5088 5136 rc = hmR0SvmRunGuest Nested(pVM, pVCpu, pCtx);5089 rc = hmR0SvmRunGuest(pVCpu, pCtx, pCtx->hwvirt.svm.HCPhysVmcb); 5137 5090 5138 5091 /* Restore any residual host-state and save any bits shared between host and guest … … 5158 5111 /* Handle the #VMEXIT. */ 5159 5112 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 5160 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.Stat Exit1, &pVCpu->hm.s.StatExit2, x);5113 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x); 5161 5114 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb)); 5162 5115 rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient); 5163 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit 2, x);5116 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 5164 5117 if ( rc != VINF_SUCCESS 5165 5118 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 5166 5119 break; 5167 if (++(*pcLoops) >= pV M->hm.s.cMaxResumeLoops)5120 if (++(*pcLoops) >= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops) 5168 5121 { 5169 5122 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops); … … 5185 5138 * 5186 5139 * @returns Strict VBox status code. 5187 * @param pVM The cross context VM structure.5188 5140 * @param pVCpu The cross context virtual CPU structure. 5189 5141 * @param pCtx Pointer to the guest-CPU context. 5190 5142 */ 5191 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)5143 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx) 5192 5144 { 5193 5145 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 5202 5154 { 5203 5155 if (!pVCpu->hm.s.fSingleInstruction) 5204 rc = hmR0SvmRunGuestCodeNormal(pV M, pVCpu, pCtx, &cLoops);5156 rc = hmR0SvmRunGuestCodeNormal(pVCpu, pCtx, &cLoops); 5205 5157 else 5206 rc = hmR0SvmRunGuestCodeStep(pV M, pVCpu, pCtx, &cLoops);5158 rc = hmR0SvmRunGuestCodeStep(pVCpu, pCtx, &cLoops); 5207 5159 } 5208 5160 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM … … 5216 5168 if (rc == VINF_SVM_VMRUN) 5217 5169 { 5218 rc = hmR0SvmRunGuestCodeNested(pV M, pVCpu, pCtx, &cLoops);5170 rc = hmR0SvmRunGuestCodeNested(pVCpu, pCtx, &cLoops); 5219 5171 if (rc == VINF_SVM_VMEXIT) 5220 5172 rc = VINF_SUCCESS; … … 5229 5181 5230 5182 /* Prepare to return to ring-3. This will remove longjmp notifications. */ 5231 rc = hmR0SvmExitToRing3(pV M, pVCpu, pCtx, rc);5183 rc = hmR0SvmExitToRing3(pVCpu, pCtx, rc); 5232 5184 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu)); 5233 5185 return rc; … … 5954 5906 * @retval VERR_* Fatal errors. 5955 5907 * 5956 * @param pVM The cross context VM structure.5957 5908 * @param pVCpu The cross context virtual CPU structure. 5958 5909 * @param pCtx The guest CPU context. … … 5960 5911 * @remarks Updates the RIP if the instruction was executed successfully. 5961 5912 */ 5962 static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)5913 static int hmR0SvmInterpretInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx) 5963 5914 { 5964 5915 /* Only allow 32 & 64 bit code. */ … … 5966 5917 { 5967 5918 PDISSTATE pDis = &pVCpu->hm.s.DisState; 5968 int rc = EMInterpretDisasCurrent(pV M, pVCpu, pDis, NULL /* pcbInstr */);5919 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, NULL /* pcbInstr */); 5969 5920 if ( RT_SUCCESS(rc) 5970 5921 && pDis->pCurInstr->uOpcode == OP_INVLPG) … … 6447 6398 { 6448 6399 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6449 PVM pVM = pVCpu->CTX_SUFF(pVM); 6450 Assert(!pVM->hm.s.fNestedPaging); 6400 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 6451 6401 6452 6402 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx); … … 6465 6415 6466 6416 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6467 int rc = hmR0SvmInterpretInvlpg(pV M, pVCpu, pCtx); /* Updates RIP if successful. */6417 int rc = hmR0SvmInterpretInvlpg(pVCpu, pCtx); /* Updates RIP if successful. */ 6468 6418 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); 6469 6419 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6997 6947 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */ 6998 6948 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving 6999 6949 the result (in AL/AX/EAX). */ 7000 6950 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 7001 6951 … … 7243 7193 */ 7244 7194 if ( pVM->hm.s.fTprPatchingAllowed 7245 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)7246 7195 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR 7247 7196 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */ 7248 7197 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */ 7198 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7249 7199 && !CPUMIsGuestInLongModeEx(pCtx) 7250 7200 && !CPUMGetGuestCPL(pVCpu) … … 8049 7999 return VINF_SUCCESS; 8050 8000 } 8051 8052 8001 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 8053 8002 8054 8055 8003 /** @} */ 8056 8004
Note:
See TracChangeset
for help on using the changeset viewer.