- Timestamp:
- Apr 12, 2018 10:09:25 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71838 r71841 2383 2383 * whether the nested-guest is intercepting it or not. 2384 2384 * 2385 * @param pHostCpu Pointer to the physical CPU HM info. struct. 2386 * @param pVCpu The cross context virtual CPU structure. 2387 * @param pCtx Pointer to the nested-guest-CPU context. 2388 */ 2389 static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx) 2385 * @param pHostCpu Pointer to the physical CPU HM info. struct. 2386 * @param pVCpu The cross context virtual CPU structure. 2387 * @param pCtx Pointer to the nested-guest-CPU context. 2388 * @param pVmcbNstGst Pointer to the nested-guest VMCB. 2389 */ 2390 static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst) 2390 2391 { 2391 2392 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap; … … 4306 4307 4307 4308 4308 #ifdef VBOX_WITH_NESTED_HWVIRT 4309 /** 4310 * Prepares to run nested-guest code in AMD-V and we've committed to doing so. This4311 * means there is no backing out to ring-3 or anywhere else at thispoint.4309 /** 4310 * Prepares to run guest code in AMD-V and we've committed to doing so. This 4311 * means there is no backing out to ring-3 or anywhere else at this 4312 * point. 4312 4313 * 4313 4314 * @param pVM The cross context VM structure. … … 4319 4320 * @remarks No-long-jump zone!!! 4320 4321 */ 4321 static void hmR0SvmPreRunGuestCommitted Nested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4322 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4322 4323 { 4323 4324 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 4324 4325 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 4325 4326 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 4326 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);4327 4327 4328 4328 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 4329 4329 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */ 4330 4330 4331 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4332 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst); 4333 4334 /* Pre-load the guest FPU state. */ 4331 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx); 4332 PSVMVMCB pVmcb = !fInNestedGuestMode ? pVCpu->hm.s.svm.pVmcb : pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4333 4334 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb); 4335 4335 4336 if (!CPUMIsGuestFPUStateActive(pVCpu)) 4336 4337 { … … 4342 4343 } 4343 4344 4344 /* Load the state shared between host and nested-guest (FPU, debug). */4345 /* Load the state shared between host and guest (FPU, debug). */ 4345 4346 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) 4346 hmR0SvmLoadSharedState(pVCpu, pVmcb NstGst, pCtx);4347 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx); 4347 4348 4348 4349 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */ … … 4357 4358 || fMigratedHostCpu) 4358 4359 { 4359 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst); 4360 if (!fInNestedGuestMode) 4361 hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb); 4362 else 4363 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcb); 4360 4364 pSvmTransient->fUpdateTscOffsetting = false; 4361 4365 } … … 4363 4367 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */ 4364 4368 if (fMigratedHostCpu) 4365 pVmcb NstGst->ctrl.u32VmcbCleanBits = 0;4369 pVmcb->ctrl.u32VmcbCleanBits = 0; 4366 4370 4367 4371 /* Store status of the shared guest-host state at the time of VMRUN. */ … … 4379 4383 } 4380 4384 4381 /* Merge the guest and nested-guest MSRPM. */ 4382 hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx); 4383 4384 /* Update the nested-guest VMCB to use the newly merged MSRPM. */ 4385 pVmcbNstGst->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm; 4386 4387 /* The TLB flushing would've already been setup by the nested-hypervisor. */ 4385 uint8_t *pbMsrBitmap; 4386 if (!fInNestedGuestMode) 4387 pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 4388 else 4389 { 4390 hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx, pVmcb); 4391 4392 /* Update the nested-guest VMCB with the newly merged MSRPM.*/ 4393 pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm; 4394 pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm; 4395 } 4396 4388 4397 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 4389 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcbNstGst, pHostCpu); 4398 /* Flush the appropriate tagged-TLB entries. */ 4399 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb, pHostCpu); 4390 4400 Assert(pVCpu->hm.s.idLastCpu == idHostCpu); 4391 4401 … … 4401 4411 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc). 4402 4412 */ 4403 uint8_t *pbMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);4404 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)4405 && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))4406 {4407 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);4408 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;4409 4410 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);4411 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);4412 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)4413 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);4414 pSvmTransient->fRestoreTscAuxMsr = true;4415 }4416 else4417 {4418 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);4419 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;4420 pSvmTransient->fRestoreTscAuxMsr = false;4421 }4422 4423 /*4424 * If VMCB Clean bits isn't supported by the CPU or exposed by the guest,4425 * mark all state-bits as dirty indicating to the CPU to re-load from VMCB.4426 */4427 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx);4428 if (!fSupportsVmcbCleanBits)4429 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;4430 }4431 #endif4432 4433 4434 /**4435 * Prepares to run guest code in AMD-V and we've committed to doing so. This4436 * means there is no backing out to ring-3 or anywhere else at this4437 * point.4438 *4439 * @param pVM The cross context VM structure.4440 * @param pVCpu The cross context virtual CPU structure.4441 * @param pCtx Pointer to the guest-CPU context.4442 * @param pSvmTransient Pointer to the SVM transient structure.4443 *4444 * @remarks Called with preemption disabled.4445 * @remarks No-long-jump zone!!!4446 */4447 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4448 {4449 Assert(!VMMRZCallRing3IsEnabled(pVCpu));4450 Assert(VMMR0IsLogFlushDisabled(pVCpu));4451 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));4452 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);4453 4454 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);4455 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */4456 4457 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4458 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);4459 4460 if (!CPUMIsGuestFPUStateActive(pVCpu))4461 {4462 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);4463 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */4464 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);4465 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);4466 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);4467 }4468 4469 /* Load the state shared between host and guest (FPU, debug). */4470 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))4471 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);4472 4473 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */4474 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));4475 4476 PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu();4477 RTCPUID const idHostCpu = pHostCpu->idCpu;4478 bool const fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu;4479 4480 /* Setup TSC offsetting. */4481 if ( pSvmTransient->fUpdateTscOffsetting4482 || fMigratedHostCpu)4483 {4484 hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);4485 pSvmTransient->fUpdateTscOffsetting = false;4486 }4487 4488 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */4489 if (fMigratedHostCpu)4490 pVmcb->ctrl.u32VmcbCleanBits = 0;4491 4492 /* Store status of the shared guest-host state at the time of VMRUN. */4493 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)4494 if (CPUMIsGuestInLongModeEx(pCtx))4495 {4496 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);4497 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);4498 }4499 else4500 #endif4501 {4502 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);4503 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);4504 }4505 4506 /* Flush the appropriate tagged-TLB entries. */4507 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */4508 hmR0SvmFlushTaggedTlb(pVCpu, pCtx, pVmcb, pHostCpu);4509 Assert(pVCpu->hm.s.idLastCpu == idHostCpu);4510 4511 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);4512 4513 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about4514 to start executing. */4515 4516 /*4517 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that4518 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.4519 *4520 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).4521 */4522 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;4523 4413 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 4524 4414 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 4525 4415 { 4416 uint64_t const uGuestTscAux = CPUMR0GetGuestTscAux(pVCpu); 4417 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 4418 if (uGuestTscAux != pVCpu->hm.s.u64HostTscAux) 4419 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux); 4526 4420 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 4527 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;4528 4529 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);4530 uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);4531 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)4532 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);4533 4421 pSvmTransient->fRestoreTscAuxMsr = true; 4534 4422 } … … 4536 4424 { 4537 4425 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 4538 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;4539 4426 pSvmTransient->fRestoreTscAuxMsr = false; 4540 4427 } 4541 4542 /* If VMCB Clean bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */ 4428 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 4429 4430 /* 4431 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the 4432 * nested virtualization case, mark all state-bits as dirty indicating to the 4433 * CPU to re-load from VMCB. 4434 */ 4543 4435 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx); 4544 4436 if (!fSupportsVmcbCleanBits) … … 4977 4869 * This also disables flushing of the R0-logger instance (if any). 4978 4870 */ 4979 hmR0SvmPreRunGuestCommitted Nested(pVM, pVCpu, pCtx, &SvmTransient);4871 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient); 4980 4872 4981 4873 rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
Note:
See TracChangeset
for help on using the changeset viewer.