Changeset 97062 in vbox
- Timestamp:
- Oct 9, 2022 10:30:29 PM (2 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r97061 r97062 3451 3451 * Update: This is very likely a compiler optimization bug, see @bugref{9180}. 3452 3452 */ 3453 # 3453 #ifdef RT_OS_WINDOWS 3454 3454 if (pVM == 0 || pVM == (void *)(uintptr_t)-1) 3455 3455 return VERR_HM_IPE_1; 3456 # 3456 #endif 3457 3457 3458 3458 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r97060 r97062 4436 4436 4437 4437 /** 4438 * Worker for VMXR0ImportStateOnDemand.4439 *4440 * @returns VBox status code.4441 * @param pVCpu The cross context virtual CPU structure.4442 * @param pVmcsInfo The VMCS info. object.4443 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.4444 */4445 static int hmR0VmxImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)4446 {4447 int rc = VINF_SUCCESS;4448 PVMCC pVM = pVCpu->CTX_SUFF(pVM);4449 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;4450 uint32_t u32Val;4451 4452 /*4453 * Note! This is hack to workaround a mysterious BSOD observed with release builds4454 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and4455 * neither are other host platforms.4456 *4457 * Committing this temporarily as it prevents BSOD.4458 *4459 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.4460 */4461 #ifdef RT_OS_WINDOWS4462 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)4463 return VERR_HM_IPE_1;4464 #endif4465 4466 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);4467 4468 /*4469 * We disable interrupts to make the updating of the state and in particular4470 * the fExtrn modification atomic wrt to preemption hooks.4471 */4472 RTCCUINTREG const fEFlags = ASMIntDisableFlags();4473 4474 fWhat &= pCtx->fExtrn;4475 if (fWhat)4476 {4477 do4478 {4479 if (fWhat & CPUMCTX_EXTRN_RIP)4480 vmxHCImportGuestRip(pVCpu);4481 4482 if (fWhat & CPUMCTX_EXTRN_RFLAGS)4483 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);4484 4485 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))4486 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);4487 4488 if (fWhat & CPUMCTX_EXTRN_RSP)4489 {4490 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RSP, &pCtx->rsp);4491 AssertRC(rc);4492 }4493 4494 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)4495 {4496 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;4497 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;4498 if (fWhat & CPUMCTX_EXTRN_CS)4499 {4500 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);4501 vmxHCImportGuestRip(pVCpu);4502 if (fRealOnV86Active)4503 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;4504 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);4505 }4506 if (fWhat & CPUMCTX_EXTRN_SS)4507 {4508 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);4509 if (fRealOnV86Active)4510 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;4511 }4512 if (fWhat & CPUMCTX_EXTRN_DS)4513 {4514 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);4515 if (fRealOnV86Active)4516 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;4517 }4518 if (fWhat & CPUMCTX_EXTRN_ES)4519 {4520 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);4521 if (fRealOnV86Active)4522 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;4523 }4524 if (fWhat & CPUMCTX_EXTRN_FS)4525 {4526 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);4527 if (fRealOnV86Active)4528 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;4529 }4530 if (fWhat & CPUMCTX_EXTRN_GS)4531 {4532 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);4533 if (fRealOnV86Active)4534 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;4535 }4536 }4537 4538 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)4539 {4540 if (fWhat & CPUMCTX_EXTRN_LDTR)4541 vmxHCImportGuestLdtr(pVCpu);4542 4543 if (fWhat & CPUMCTX_EXTRN_GDTR)4544 {4545 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);4546 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);4547 pCtx->gdtr.cbGdt = u32Val;4548 }4549 4550 /* Guest IDTR. */4551 if (fWhat & CPUMCTX_EXTRN_IDTR)4552 {4553 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);4554 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);4555 pCtx->idtr.cbIdt = u32Val;4556 }4557 4558 /* Guest TR. */4559 if (fWhat & CPUMCTX_EXTRN_TR)4560 {4561 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,4562 don't need to import that one. */4563 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)4564 vmxHCImportGuestTr(pVCpu);4565 }4566 }4567 4568 if (fWhat & CPUMCTX_EXTRN_DR7)4569 {4570 if (!pVCpu->hmr0.s.fUsingHyperDR7)4571 {4572 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);4573 AssertRC(rc);4574 }4575 }4576 4577 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)4578 {4579 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);4580 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);4581 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);4582 pCtx->SysEnter.cs = u32Val;4583 }4584 4585 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)4586 {4587 if ( pVM->hmr0.s.fAllow64BitGuests4588 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))4589 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);4590 }4591 4592 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)4593 {4594 if ( pVM->hmr0.s.fAllow64BitGuests4595 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))4596 {4597 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);4598 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);4599 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);4600 }4601 }4602 4603 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))4604 {4605 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;4606 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;4607 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;4608 Assert(pMsrs);4609 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));4610 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);4611 for (uint32_t i = 0; i < cMsrs; i++)4612 {4613 uint32_t const idMsr = pMsrs[i].u32Msr;4614 switch (idMsr)4615 {4616 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;4617 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;4618 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;4619 default:4620 {4621 uint32_t idxLbrMsr;4622 if (pVM->hmr0.s.vmx.fLbr)4623 {4624 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))4625 {4626 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));4627 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;4628 break;4629 }4630 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))4631 {4632 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));4633 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;4634 break;4635 }4636 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)4637 {4638 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;4639 break;4640 }4641 /* Fallthru (no break) */4642 }4643 pCtx->fExtrn = 0;4644 pVCpu->hm.s.u32HMError = pMsrs->u32Msr;4645 ASMSetFlags(fEFlags);4646 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));4647 return VERR_HM_UNEXPECTED_LD_ST_MSR;4648 }4649 }4650 }4651 }4652 4653 if (fWhat & CPUMCTX_EXTRN_CR_MASK)4654 {4655 if (fWhat & CPUMCTX_EXTRN_CR0)4656 {4657 uint64_t u64Cr0;4658 uint64_t u64Shadow;4659 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);4660 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);4661 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX4662 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)4663 | (u64Shadow & pVmcsInfo->u64Cr0Mask);4664 #else4665 if (!CPUMIsGuestInVmxNonRootMode(pCtx))4666 {4667 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)4668 | (u64Shadow & pVmcsInfo->u64Cr0Mask);4669 }4670 else4671 {4672 /*4673 * We've merged the guest and nested-guest's CR0 guest/host mask while executing4674 * the nested-guest using hardware-assisted VMX. Accordingly we need to4675 * re-construct CR0. See @bugref{9180#c95} for details.4676 */4677 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;4678 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;4679 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)4680 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)4681 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));4682 }4683 #endif4684 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */4685 CPUMSetGuestCR0(pVCpu, u64Cr0);4686 VMMRZCallRing3Enable(pVCpu);4687 }4688 4689 if (fWhat & CPUMCTX_EXTRN_CR4)4690 {4691 uint64_t u64Cr4;4692 uint64_t u64Shadow;4693 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);4694 rc |= VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);4695 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX4696 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)4697 | (u64Shadow & pVmcsInfo->u64Cr4Mask);4698 #else4699 if (!CPUMIsGuestInVmxNonRootMode(pCtx))4700 {4701 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)4702 | (u64Shadow & pVmcsInfo->u64Cr4Mask);4703 }4704 else4705 {4706 /*4707 * We've merged the guest and nested-guest's CR4 guest/host mask while executing4708 * the nested-guest using hardware-assisted VMX. Accordingly we need to4709 * re-construct CR4. See @bugref{9180#c95} for details.4710 */4711 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;4712 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;4713 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)4714 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)4715 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));4716 }4717 #endif4718 pCtx->cr4 = u64Cr4;4719 }4720 4721 if (fWhat & CPUMCTX_EXTRN_CR3)4722 {4723 /* CR0.PG bit changes are always intercepted, so it's up to date. */4724 if ( pVM->hmr0.s.vmx.fUnrestrictedGuest4725 || ( pVM->hmr0.s.fNestedPaging4726 && CPUMIsGuestPagingEnabledEx(pCtx)))4727 {4728 uint64_t u64Cr3;4729 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);4730 if (pCtx->cr3 != u64Cr3)4731 {4732 pCtx->cr3 = u64Cr3;4733 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);4734 }4735 4736 /*4737 * If the guest is in PAE mode, sync back the PDPE's into the guest state.4738 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.4739 */4740 if (CPUMIsGuestInPAEModeEx(pCtx))4741 {4742 X86PDPE aPaePdpes[4];4743 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);4744 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);4745 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);4746 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);4747 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))4748 {4749 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));4750 /* PGM now updates PAE PDPTEs while updating CR3. */4751 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);4752 }4753 }4754 }4755 }4756 }4757 4758 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4759 if (fWhat & CPUMCTX_EXTRN_HWVIRT)4760 {4761 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)4762 && !CPUMIsGuestInVmxNonRootMode(pCtx))4763 {4764 Assert(CPUMIsGuestInVmxRootMode(pCtx));4765 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);4766 if (RT_SUCCESS(rc))4767 { /* likely */ }4768 else4769 break;4770 }4771 }4772 #endif4773 } while (0);4774 4775 if (RT_SUCCESS(rc))4776 {4777 /* Update fExtrn. */4778 pCtx->fExtrn &= ~fWhat;4779 4780 /* If everything has been imported, clear the HM keeper bit. */4781 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))4782 {4783 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;4784 Assert(!pCtx->fExtrn);4785 }4786 }4787 }4788 else4789 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));4790 4791 /*4792 * Restore interrupts.4793 */4794 ASMSetFlags(fEFlags);4795 4796 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);4797 4798 if (RT_SUCCESS(rc))4799 { /* likely */ }4800 else4801 return rc;4802 4803 /*4804 * Honor any pending CR3 updates.4805 *4806 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()4807 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp4808 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.4809 *4810 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus4811 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that4812 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should4813 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!4814 *4815 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.4816 *4817 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.4818 */4819 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)4820 && VMMRZCallRing3IsEnabled(pVCpu))4821 {4822 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));4823 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));4824 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));4825 }4826 4827 return VINF_SUCCESS;4828 }4829 4830 4831 /**4832 4438 * Saves the guest state from the VMCS into the guest-CPU context. 4833 4439 * … … 4840 4446 AssertPtr(pVCpu); 4841 4447 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 4842 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);4448 return vmxHCImportGuestState(pVCpu, pVmcsInfo, fWhat); 4843 4449 } 4844 4450 … … 4992 4598 if (fImportState) 4993 4599 { 4994 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);4600 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 4995 4601 AssertRCReturn(rc, rc); 4996 4602 } … … 5023 4629 if (!fImportState) 5024 4630 { 5025 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);4631 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS); 5026 4632 AssertRCReturn(rc, rc); 5027 4633 } … … 7129 6735 if (fStepping) 7130 6736 { 7131 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);6737 int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 7132 6738 AssertRC(rc); 7133 6739 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart … … 7156 6762 if (pVCpu->hmr0.s.fClearTrapFlag) 7157 6763 { 7158 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);6764 int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 7159 6765 AssertRC(rc); 7160 6766 pVCpu->hmr0.s.fClearTrapFlag = false;
Note:
See TracChangeset
for help on using the changeset viewer.