Changeset 92566 in vbox
- Timestamp:
- Nov 23, 2021 3:29:41 PM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148448
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r92546 r92566 676 676 677 677 /** 678 * Determines whether the guest is using PAE paging given the VMCS. 679 * 680 * @returns @c true if PAE paging mode is used, @c false otherwise. 681 * @param pVmcs Pointer to the virtual VMCS. 682 */ 683 DECL_FORCE_INLINE(bool) iemVmxVmcsIsGuestPaePagingEnabled(PCVMXVVMCS pVmcs) 684 { 685 return ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST) 686 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE) 687 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)); 688 } 689 690 691 /** 678 692 * Sets the Exit qualification VMCS field. 679 693 * … … 2583 2597 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_ALL_MASK); 2584 2598 2599 /* 2600 * We're no longer in nested-guest execution mode. 2601 * 2602 * It is important to do this prior to loading the host state because 2603 * PGM looks at fInVmxNonRootMode to determine if it needs to perform 2604 * second-level address translation while switching to host CR3. 2605 */ 2606 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false; 2607 2585 2608 /* Restore the host (outer guest) state. */ 2586 2609 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason); … … 2592 2615 else 2593 2616 Log3(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict))); 2594 2595 /* We're no longer in nested-guest execution mode. */2596 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;2597 2617 2598 2618 /* Notify HM that the current VMCS fields have been modified. */ … … 5603 5623 * 5604 5624 * @param pVCpu The cross context virtual CPU structure. 5605 * @param pfPdpesMapped Where to store whether PAE PDPTEs (and PDPT) have been5606 * mapped as part of checking guest state.5607 5625 * @param pszInstr The VMX instruction name (for logging purposes). 5608 5626 */ 5609 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, bool *pfPdpesMapped,const char *pszInstr)5627 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr) 5610 5628 { 5611 5629 /* … … 5615 5633 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5616 5634 const char * const pszFailure = "VM-exit"; 5617 *pfPdpesMapped = false; 5618 5619 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST) 5620 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE) 5621 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)) 5622 { 5635 5623 5636 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 5624 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 5625 { 5626 /* Get PDPTEs from the VMCS. */ 5627 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES]; 5628 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u; 5629 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u; 5630 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u; 5631 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u; 5632 5633 /* Check validity of the PDPTEs. */ 5634 bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]); 5635 if (fValid) 5636 { /* likely */ } 5637 else 5638 { 5639 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 5640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte); 5641 } 5642 } 5643 else 5637 /* 5638 * When EPT is used, we only validate the PAE PDPTEs provided in the VMCS. 5639 * Otherwise, we load any PAE PDPTEs referenced by CR3 at a later point. 5640 */ 5641 if ( iemVmxVmcsIsGuestPaePagingEnabled(pVmcs) 5642 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)) 5643 { 5644 /* Get PDPTEs from the VMCS. */ 5645 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES]; 5646 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u; 5647 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u; 5648 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u; 5649 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u; 5650 5651 /* Check validity of the PDPTEs. */ 5652 bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]); 5653 if (fValid) 5654 { /* likely */ } 5655 else 5656 { 5657 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 5658 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte); 5659 } 5660 } 5644 5661 #endif 5645 {5646 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);5647 if (RT_SUCCESS(rc))5648 *pfPdpesMapped = true;5649 else5650 {5651 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);5652 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);5653 }5654 }5655 }5656 5662 5657 5663 NOREF(pszInstr); … … 5670 5676 * @param pszInstr The VMX instruction name (for logging purposes). 5671 5677 */ 5672 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, bool *pfPdpesMapped,const char *pszInstr)5678 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr) 5673 5679 { 5674 5680 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr); … … 5686 5692 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr); 5687 5693 if (RT_SUCCESS(rc)) 5688 return iemVmxVmentryCheckGuestPdptes(pVCpu, p fPdpesMapped, pszInstr);5694 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr); 5689 5695 } 5690 5696 } … … 6629 6635 * @returns VBox status code. 6630 6636 * @param pVCpu The cross context virtual CPU structure. 6637 * @param pszInstr The VMX instruction name (for logging purposes). 6631 6638 * 6632 6639 * @remarks This must be called only after loading the nested-guest register state 6633 6640 * (especially nested-guest RIP). 6634 6641 */ 6635 IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu)6642 IEM_STATIC int iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr) 6636 6643 { 6637 6644 /* … … 6639 6646 * See Intel spec. 26.6 "Special Features of VM Entry" 6640 6647 */ 6648 const char *const pszFailure = "VM-exit"; 6641 6649 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6642 6650 … … 6672 6680 6673 6681 /* 6674 * Load the PAE PDPTEs from the VMCS when using EPT with PAE paging. 6675 */ 6676 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 6677 { 6678 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST) 6679 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) 6680 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)) 6681 { 6682 * Load the guest's PAE PDPTEs. 6683 */ 6684 if (iemVmxVmcsIsGuestPaePagingEnabled(pVmcs)) 6685 { 6686 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 6687 { 6688 /* 6689 * With EPT, we've already validated these while checking the guest state. 6690 * Just load them directly from the VMCS here. 6691 */ 6682 6692 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES]; 6683 6693 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u; … … 6689 6699 pVCpu->cpum.GstCtx.aPaePdpes[i].u = aPaePdptes[i].u; 6690 6700 } 6691 6692 /* 6693 * Set PGM's copy of the EPT pointer. 6694 * The EPTP has already been validated while checking guest state. 6695 */ 6701 else 6702 { 6703 /* 6704 * Without EPT, we must load the PAE PDPTEs referenced by CR3. 6705 * This involves loading (and mapping) CR3 and validating them now. 6706 */ 6707 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u); 6708 if (RT_SUCCESS(rc)) 6709 { /* likely */ } 6710 else 6711 { 6712 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 6713 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte); 6714 } 6715 } 6716 } 6717 6718 /* 6719 * Set PGM's copy of the EPT pointer. 6720 * The EPTP has already been validated while checking guest state. 6721 */ 6722 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 6696 6723 PGMSetGuestEptPtr(pVCpu, pVmcs->u64EptPtr.u); 6697 }6698 6724 6699 6725 /* VPID is irrelevant. We don't support VPID yet. */ … … 6701 6727 /* Clear address-range monitoring. */ 6702 6728 EMMonitorWaitClear(pVCpu); 6729 6730 return VINF_SUCCESS; 6703 6731 } 6704 6732 … … 6920 6948 6921 6949 /* Load guest non-register state (such as interrupt shadows, NMI blocking etc). */ 6922 iemVmxVmentryLoadGuestNonRegState(pVCpu); 6950 int rc = iemVmxVmentryLoadGuestNonRegState(pVCpu, pszInstr); 6951 if (rc == VINF_SUCCESS) 6952 { /* likely */ } 6953 else 6954 return rc; 6923 6955 6924 6956 /* Load VMX related structures and state referenced by the VMCS. */ 6925 intrc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);6957 rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr); 6926 6958 if (rc == VINF_SUCCESS) 6927 6959 { /* likely */ } … … 7433 7465 iemVmxVmentrySaveNmiBlockingFF(pVCpu); 7434 7466 7435 bool fPdpesMapped; 7436 rc = iemVmxVmentryCheckGuestState(pVCpu, &fPdpesMapped, pszInstr); 7467 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr); 7437 7468 if (RT_SUCCESS(rc)) 7438 7469 { 7470 /* 7471 * We've now entered nested-guest execution. 7472 * 7473 * It is important do this prior to loading the guest state because 7474 * as part of loading the guest state, PGM (and perhaps other components 7475 * in the future) relies on detecting whether VMX non-root mode has been 7476 * entered. 7477 */ 7478 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true; 7479 7439 7480 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr); 7440 7481 if (RT_SUCCESS(rc)) … … 7448 7489 if (uInstrId == VMXINSTRID_VMLAUNCH) 7449 7490 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; 7491 7492 /* We would have mapped PAE PDPTEs when PAE paging is used without EPT. */ 7493 bool const fPdpesMapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 7494 && iemVmxVmcsIsGuestPaePagingEnabled(pVmcs); 7450 7495 7451 7496 /* Perform the VMX transition (PGM updates). */ … … 7467 7512 /* Paranoia. */ 7468 7513 Assert(rcStrict == VINF_SUCCESS); 7469 7470 /* We've now entered nested-guest execution. */7471 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;7472 7514 7473 7515 /*
Note:
See TracChangeset
for help on using the changeset viewer.