Changeset 91580 in vbox for trunk/src/VBox
- Timestamp:
- Oct 6, 2021 7:22:04 AM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r91305 r91580 2325 2325 2326 2326 /** 2327 * Sets the PAE PDP TEs for the guest.2327 * Sets the PAE PDPEs for the guest. 2328 2328 * 2329 2329 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2330 * @param paPaePdpes The PAE PDP TEs to set.2330 * @param paPaePdpes The PAE PDPEs to set. 2331 2331 */ 2332 2332 VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes) … … 2336 2336 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u; 2337 2337 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3; 2338 } 2339 2340 2341 /** 2342 * Gets the PAE PDPTEs for the guest. 2343 * 2344 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2345 * @param paPaePdpes Where to store the PAE PDPEs. 2346 */ 2347 VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes) 2348 { 2349 Assert(paPaePdpes); 2350 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR3); 2351 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++) 2352 paPaePdpes[i].u = pVCpu->cpum.s.Guest.aPaePdpes[i].u; 2338 2353 } 2339 2354 -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r91360 r91580 205 205 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ), 206 206 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPcide , "GuestPcide" ), 207 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys , "GuestPdpteCr3ReadPhys" ), 208 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte0Rsvd , "GuestPdpte0Rsvd" ), 209 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte1Rsvd , "GuestPdpte1Rsvd" ), 210 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte2Rsvd , "GuestPdpte2Rsvd" ), 211 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte3Rsvd , "GuestPdpte3Rsvd" ), 207 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte , "GuestPdpteRsvd" ), 212 208 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf , "GuestPndDbgXcptBsNoTf" ), 213 209 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf , "GuestPndDbgXcptBsTf" ), … … 362 358 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot , "VmxRoot" ), 363 359 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid , "Vpid" ), 364 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys , "HostPdpteCr3ReadPhys" ), 365 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte0Rsvd , "HostPdpte0Rsvd" ), 366 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte1Rsvd , "HostPdpte1Rsvd" ), 367 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte2Rsvd , "HostPdpte2Rsvd" ), 368 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte3Rsvd , "HostPdpte3Rsvd" ), 360 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_HostPdpte , "HostPdpte" ), 369 361 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoad , "MsrLoad" ), 370 362 VMXV_DIAG_DESC(kVmxVDiag_Vmexit_MsrLoadCount , "MsrLoadCount" ), -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r91281 r91580 4355 4355 4356 4356 /* Inform PGM. */ 4357 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE)); 4357 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */ 4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */); 4358 4359 AssertRCReturn(rc, rc); 4359 4360 /* ignore informational status codes */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r91297 r91580 3918 3918 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE)) 3919 3919 { 3920 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */ );3920 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, false /* fPdpesMapped */); 3921 3921 AssertRCReturn(rc, rc); 3922 3922 /* ignore informational status codes */ … … 5698 5698 5699 5699 /** 5700 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'. 5701 */ 5702 #define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \ 5703 do \ 5704 { \ 5705 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \ 5706 if (RT_SUCCESS(rcX)) \ 5707 { /* likely */ } \ 5708 else \ 5709 { \ 5710 if (rcX == VERR_PGM_PAE_PDPE_RSVD) \ 5711 { \ 5712 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \ 5713 return iemRaiseGeneralProtectionFault0(a_pVCpu); \ 5714 } \ 5715 Log(("iemCImpl_load_Cr%#x: PGMGstReadPaePdpesAtCr3 failed %Rrc\n", a_iCrReg, rcX)); \ 5716 return rcX; \ 5717 } \ 5718 } while (0) 5719 5720 5721 /** 5700 5722 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'. 5701 5723 * … … 5807 5829 } 5808 5830 5809 /** @todo check reserved PDPTR bits as AMD states. */5810 5811 5831 /* 5812 5832 * SVM nested-guest CR0 write intercepts. … … 5832 5852 5833 5853 /* 5834 * Change CR0.5835 */5836 CPUMSetGuestCR0(pVCpu, uNewCrX);5837 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);5838 5839 /*5840 5854 * Change EFER.LMA if entering or leaving long mode. 5841 5855 */ 5856 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER; 5842 5857 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG) 5843 5858 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) ) 5844 5859 { 5845 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;5846 5860 if (uNewCrX & X86_CR0_PG) 5847 5861 NewEFER |= MSR_K6_EFER_LMA; … … 5859 5873 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) ) 5860 5874 { 5861 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */); 5875 bool fPdpesMapped; 5876 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX 5877 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER) 5878 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5879 fPdpesMapped = false; 5880 else 5881 { 5882 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3); 5883 fPdpesMapped = true; 5884 } 5885 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped); 5862 5886 AssertRCReturn(rc, rc); 5863 5887 /* ignore informational status codes */ 5864 5888 } 5889 5890 /* 5891 * Change CR0. 5892 */ 5893 CPUMSetGuestCR0(pVCpu, uNewCrX); 5894 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX); 5895 5865 5896 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 5866 5897 break; … … 5935 5966 } 5936 5967 5937 /** @todo If we're in PAE mode we should check the PDPTRs for 5938 * invalid bits. */ 5968 /* Inform PGM. */ 5969 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) 5970 { 5971 bool fPdpesMapped; 5972 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu)) 5973 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5974 fPdpesMapped = false; 5975 else 5976 { 5977 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX); 5978 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX); 5979 fPdpesMapped = true; 5980 } 5981 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), fPdpesMapped); 5982 AssertRCReturn(rc, rc); 5983 /* ignore informational status codes */ 5984 } 5939 5985 5940 5986 /* Make the change. */ … … 5942 5988 AssertRCSuccessReturn(rc, rc); 5943 5989 5944 /* Inform PGM. */5945 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)5946 {5947 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));5948 AssertRCReturn(rc, rc);5949 /* ignore informational status codes */5950 }5951 5990 rcStrict = VINF_SUCCESS; 5952 5991 break; … … 6018 6057 6019 6058 /* 6059 * Notify PGM. 6060 */ 6061 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */)) 6062 { 6063 bool fPdpesMapped; 6064 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER) 6065 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 6066 fPdpesMapped = false; 6067 else 6068 { 6069 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX); 6070 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3); 6071 fPdpesMapped = true; 6072 } 6073 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fPdpesMapped); 6074 AssertRCReturn(rc, rc); 6075 /* ignore informational status codes */ 6076 } 6077 6078 /* 6020 6079 * Change it. 6021 6080 */ … … 6024 6083 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX); 6025 6084 6026 /*6027 * Notify SELM and PGM.6028 */6029 /* SELM - VME may change things wrt to the TSS shadowing. */6030 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)6031 Log(("iemCImpl_load_CrX: VME %d -> %d\n", RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));6032 6033 /* PGM - flushing and mode. */6034 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))6035 {6036 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);6037 AssertRCReturn(rc, rc);6038 /* ignore informational status codes */6039 }6040 6085 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 6041 6086 break; … … 6692 6737 6693 6738 /* Invalidate mappings for the linear address tagged with PCID except global translations. */ 6694 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */ );6739 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */); 6695 6740 break; 6696 6741 } … … 6705 6750 } 6706 6751 /* Invalidate all mappings associated with PCID except global translations. */ 6707 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */ );6752 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */); 6708 6753 break; 6709 6754 } … … 6711 6756 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL: 6712 6757 { 6713 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */ );6758 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */); 6714 6759 break; 6715 6760 } … … 6717 6762 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL: 6718 6763 { 6719 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */ );6764 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */, false /* fPdpesMapped */); 6720 6765 break; 6721 6766 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r91291 r91580 82 82 * 83 83 * @returns Strict VBox status code. 84 * @param pVCpu The cross context virtual CPU structure. 85 */ 86 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu) 84 * @param pVCpu The cross context virtual CPU structure. 85 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 86 */ 87 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fPdpesMapped) 87 88 { 88 89 /* … … 106 107 if (rc == VINF_SUCCESS) 107 108 { 108 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true );109 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped); 109 110 AssertRCReturn(rc, rc); 110 111 } … … 307 308 308 309 /* 309 * Reload the guest's "host state". 310 * If we are switching to PAE mode host, validate the PDPEs first. 311 * Any invalid PDPEs here causes a VCPU shutdown. 310 312 */ 311 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu)); 312 313 /* 314 * Update PGM, IEM and others of a world-switch. 315 */ 316 rcStrict = iemSvmWorldSwitch(pVCpu); 317 if (rcStrict == VINF_SUCCESS) 318 rcStrict = VINF_SVM_VMEXIT; 319 else if (RT_SUCCESS(rcStrict)) 313 PCSVMHOSTSTATE pHostState = &pVCpu->cpum.GstCtx.hwvirt.svm.HostState; 314 bool const fHostInPaeMode = CPUMIsPaePagingEnabled(pHostState->uCr0, pHostState->uCr4, pHostState->uEferMsr); 315 if (fHostInPaeMode) 316 rcStrict = PGMGstMapPaePdpesAtCr3(pVCpu, pHostState->uCr3); 317 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 320 318 { 321 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 322 iemSetPassUpStatus(pVCpu, rcStrict); 323 rcStrict = VINF_SVM_VMEXIT; 319 /* 320 * Reload the host state. 321 */ 322 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu)); 323 324 /* 325 * Update PGM, IEM and others of a world-switch. 326 */ 327 rcStrict = iemSvmWorldSwitch(pVCpu, fHostInPaeMode); 328 if (rcStrict == VINF_SUCCESS) 329 rcStrict = VINF_SVM_VMEXIT; 330 else if (RT_SUCCESS(rcStrict)) 331 { 332 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 333 iemSetPassUpStatus(pVCpu, rcStrict); 334 rcStrict = VINF_SVM_VMEXIT; 335 } 336 else 337 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 324 338 } 325 339 else 326 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 340 { 341 Log(("iemSvmVmexit: PAE PDPEs invalid while restoring host state. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 342 rcStrict = VINF_EM_TRIPLE_FAULT; 343 } 327 344 } 328 345 else 329 346 { 330 347 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict))); 331 rcStrict = V ERR_SVM_VMEXIT_FAILED;348 rcStrict = VINF_EM_TRIPLE_FAULT; 332 349 } 333 350 } … … 705 722 706 723 /* 724 * Validate and map PAE PDPEs if the guest will be using PAE paging. 725 * Invalid PAE PDPEs here causes a #VMEXIT. 726 */ 727 bool fPdpesMapped; 728 if ( !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging 729 && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer)) 730 { 731 rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3); 732 if (RT_SUCCESS(rc)) 733 fPdpesMapped = true; 734 else 735 { 736 Log(("iemSvmVmrun: PAE PDPEs invalid -> #VMEXIT\n")); 737 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 738 } 739 } 740 else 741 fPdpesMapped = false; 742 743 /* 707 744 * Copy the remaining guest state from the VMCB to the guest-CPU context. 708 745 */ … … 742 779 * Update PGM, IEM and others of a world-switch. 743 780 */ 744 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu );781 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, fPdpesMapped); 745 782 if (rcStrict == VINF_SUCCESS) 746 783 { /* likely */ } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r91427 r91580 1104 1104 1105 1105 /** 1106 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits1107 * failure during VM-entry of a nested-guest.1108 *1109 * @param iSegReg The PDPTE entry index.1110 */1111 IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)1112 {1113 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);1114 switch (iPdpte)1115 {1116 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;1117 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;1118 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;1119 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;1120 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);1121 }1122 }1123 1124 1125 /**1126 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits1127 * failure during VM-exit of a nested-guest.1128 *1129 * @param iSegReg The PDPTE entry index.1130 */1131 IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)1132 {1133 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);1134 switch (iPdpte)1135 {1136 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;1137 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;1138 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;1139 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;1140 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);1141 }1142 }1143 1144 1145 /**1146 1106 * Saves the guest control registers, debug registers and some MSRs are part of 1147 1107 * VM-exit. … … 1254 1214 1255 1215 /** 1256 * Perform a VMX transition updated PGM, IEM and CPUM. 1257 * 1258 * @param pVCpu The cross context virtual CPU structure. 1259 */ 1260 IEM_STATIC int iemVmxWorldSwitch(PVMCPUCC pVCpu) 1216 * Performs the VMX transition to/from VMX non-root mode. 1217 * 1218 * @param pVCpu The cross context virtual CPU structure. 1219 * @param fPdpesMapped Whether the PAE PDPTEs (and PDPT) have been mapped. 1220 */ 1221 IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fPdpesMapped) 1261 1222 { 1262 1223 /* … … 1280 1241 if (rc == VINF_SUCCESS) 1281 1242 { 1282 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true );1243 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fPdpesMapped); 1283 1244 AssertRCReturn(rc, rc); 1284 1245 } … … 1903 1864 1904 1865 /** 1905 * Checks host PDPTes as part of VM-exit.1866 * Checks the host PAE PDPTEs assuming we are switching to a PAE mode host. 1906 1867 * 1907 1868 * @param pVCpu The cross context virtual CPU structure. 1908 * @param uExitReason The VM-exit reason (for logging purposes). 1869 * @param uExitReason The VMX instruction name (for logging purposes). 1870 * 1871 * @remarks Caller must ensure the preconditions are met before calling this 1872 * function as failure here will trigger VMX aborts! 1909 1873 */ 1910 1874 IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPUCC pVCpu, uint32_t uExitReason) 1911 1875 { 1912 /* 1913 * Check host PDPTEs. 1914 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries". 1915 */ 1916 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1917 const char * const pszFailure = "VMX-abort"; 1918 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1919 1920 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) 1921 && !fHostInLongMode) 1922 { 1923 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK; 1924 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES]; 1925 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes)); 1926 if (RT_SUCCESS(rc)) 1927 { 1928 uint8_t idxInvalid; 1929 bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid); 1930 if (fValid) 1931 { /* likely */ } 1932 else 1933 { 1934 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(idxInvalid); 1935 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag); 1936 } 1937 } 1938 else 1939 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys); 1940 } 1941 1942 NOREF(pszFailure); 1943 NOREF(uExitReason); 1944 return VINF_SUCCESS; 1876 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1877 const char * const pszFailure = "VMX-abort"; 1878 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u); 1879 if (RT_SUCCESS(rc)) 1880 return rc; 1881 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpte); 1945 1882 } 1946 1883 … … 1950 1887 * 1951 1888 * @returns VBox status code. 1952 * @param pVCpu The cross context virtual CPU structure.1953 * @param pszInstrThe VMX instruction name (for logging purposes).1889 * @param pVCpu The cross context virtual CPU structure. 1890 * @param uExitReason The VMX instruction name (for logging purposes). 1954 1891 */ 1955 1892 IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason) … … 2054 1991 } 2055 1992 1993 /* 1994 * Check host PAE PDPTEs prior to loading the host state. 1995 * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries". 1996 */ 1997 bool fPdpesMapped; 1998 if ( (pVmcs->u64HostCr4.u & X86_CR4_PAE) 1999 && !fHostInLongMode 2000 && ( !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx) 2001 || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3)) 2002 { 2003 int const rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason); 2004 if (RT_FAILURE(rc)) 2005 { 2006 Log(("VM-exit attempting to load invalid PDPTEs -> VMX-Abort\n")); 2007 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE); 2008 } 2009 fPdpesMapped = true; 2010 } 2011 else 2012 fPdpesMapped = false; 2013 2056 2014 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu); 2057 2015 iemVmxVmexitLoadHostSegRegs(pVCpu); … … 2069 2027 2070 2028 /* Perform the VMX transition (PGM updates). */ 2071 VBOXSTRICTRC rcStrict = iemVmx WorldSwitch(pVCpu);2029 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped); 2072 2030 if (rcStrict == VINF_SUCCESS) 2073 { 2074 /* Check host PDPTEs (only when we've fully switched page tables_. */ 2075 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */ 2076 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason); 2077 if (RT_FAILURE(rc)) 2078 { 2079 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n")); 2080 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE); 2081 } 2082 } 2031 { /* likely */ } 2083 2032 else if (RT_SUCCESS(rcStrict)) 2084 2033 { 2085 Log3(("VM-exit: iemVmx WorldSwitchreturns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),2034 Log3(("VM-exit: iemVmxTransition returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict), 2086 2035 uExitReason)); 2087 2036 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); … … 2089 2038 else 2090 2039 { 2091 Log3(("VM-exit: iemVmx WorldSwitchfailed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));2040 Log3(("VM-exit: iemVmxTransition failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason)); 2092 2041 return VBOXSTRICTRC_VAL(rcStrict); 2093 2042 } … … 5643 5592 * Checks guest PDPTEs as part of VM-entry. 5644 5593 * 5645 * @param pVCpu The cross context virtual CPU structure. 5646 * @param pszInstr The VMX instruction name (for logging purposes). 5647 */ 5648 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr) 5594 * @param pVCpu The cross context virtual CPU structure. 5595 * @param pfPdpesMapped Where to store whether PAE PDPTEs (and PDPT) have been 5596 * mapped as part of checking guest state. 5597 * @param pszInstr The VMX instruction name (for logging purposes). 5598 */ 5599 IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr) 5649 5600 { 5650 5601 /* … … 5654 5605 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5655 5606 const char * const pszFailure = "VM-exit"; 5607 *pfPdpesMapped = false; 5656 5608 5657 5609 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST) … … 5659 5611 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)) 5660 5612 { 5661 /* Get the PDPTEs. */5662 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];5663 5613 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 5664 5614 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 5665 5615 { 5666 aPdptes[0].u = pVmcs->u64GuestPdpte0.u; 5667 aPdptes[1].u = pVmcs->u64GuestPdpte1.u; 5668 aPdptes[2].u = pVmcs->u64GuestPdpte2.u; 5669 aPdptes[3].u = pVmcs->u64GuestPdpte3.u; 5670 } 5671 else 5672 #endif 5673 { 5674 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK; 5675 int const rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes)); 5676 if (RT_FAILURE(rc)) 5616 /* Get PDPTEs from the VMCS. */ 5617 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES]; 5618 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u; 5619 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u; 5620 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u; 5621 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u; 5622 5623 /* Check validity of the PDPTEs. */ 5624 bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]); 5625 if (fValid) 5626 { /* likely */ } 5627 else 5677 5628 { 5678 5629 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 5679 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte Cr3ReadPhys);5630 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte); 5680 5631 } 5681 5632 } 5682 5683 /* Check validity of the PDPTEs. */ 5684 uint8_t idxInvalid;5685 bool const fValid = CPUMArePaePdpesValid(&aPdptes[0], &idxInvalid);5686 if (fValid)5687 { /* likely */ }5688 else5689 {5690 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(idxInvalid);5691 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);5692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);5633 else 5634 #endif 5635 { 5636 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u); 5637 if (rc == VINF_SUCCESS) 5638 *pfPdpesMapped = true; 5639 else 5640 { 5641 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 5642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte); 5643 } 5693 5644 } 5694 5645 } … … 5704 5655 * 5705 5656 * @returns VBox status code. 5706 * @param pVCpu The cross context virtual CPU structure. 5707 * @param pszInstr The VMX instruction name (for logging purposes). 5708 */ 5709 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr) 5657 * @param pVCpu The cross context virtual CPU structure. 5658 * @param pfPdpesMapped Where to store whether PAE PDPTEs (and PDPT) have been 5659 * mapped as part of checking guest state. 5660 * @param pszInstr The VMX instruction name (for logging purposes). 5661 */ 5662 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, bool *pfPdpesMapped, const char *pszInstr) 5710 5663 { 5711 5664 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr); … … 5723 5676 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr); 5724 5677 if (RT_SUCCESS(rc)) 5725 return iemVmxVmentryCheckGuestPdptes(pVCpu, p szInstr);5678 return iemVmxVmentryCheckGuestPdptes(pVCpu, pfPdpesMapped, pszInstr); 5726 5679 } 5727 5680 } … … 7419 7372 iemVmxVmentrySaveNmiBlockingFF(pVCpu); 7420 7373 7421 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr); 7374 bool fPdpesMapped; 7375 rc = iemVmxVmentryCheckGuestState(pVCpu, &fPdpesMapped, pszInstr); 7422 7376 if (RT_SUCCESS(rc)) 7423 7377 { … … 7435 7389 7436 7390 /* Perform the VMX transition (PGM updates). */ 7437 VBOXSTRICTRC rcStrict = iemVmx WorldSwitch(pVCpu);7391 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fPdpesMapped); 7438 7392 if (rcStrict == VINF_SUCCESS) 7439 7393 { /* likely */ } 7440 7394 else if (RT_SUCCESS(rcStrict)) 7441 7395 { 7442 Log3(("%s: iemVmx WorldSwitchreturns %Rrc -> Setting passup status\n", pszInstr,7396 Log3(("%s: iemVmxTransition returns %Rrc -> Setting passup status\n", pszInstr, 7443 7397 VBOXSTRICTRC_VAL(rcStrict))); 7444 7398 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); … … 7446 7400 else 7447 7401 { 7448 Log3(("%s: iemVmx WorldSwitchfailed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));7402 Log3(("%s: iemVmxTransition failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict))); 7449 7403 return rcStrict; 7450 7404 } … … 8437 8391 /* Invalidate mappings for the linear address tagged with VPID. */ 8438 8392 /** @todo PGM support for VPID? Currently just flush everything. */ 8439 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */ );8393 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */); 8440 8394 iemVmxVmSucceed(pVCpu); 8441 8395 } … … 8464 8418 /* Invalidate all mappings with VPID. */ 8465 8419 /** @todo PGM support for VPID? Currently just flush everything. */ 8466 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */ );8420 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */); 8467 8421 iemVmxVmSucceed(pVCpu); 8468 8422 } … … 8481 8435 /* Invalidate all mappings with non-zero VPIDs. */ 8482 8436 /** @todo PGM support for VPID? Currently just flush everything. */ 8483 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */ );8437 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */); 8484 8438 iemVmxVmSucceed(pVCpu); 8485 8439 break; … … 8492 8446 /* Invalidate all mappings with VPID except global translations. */ 8493 8447 /** @todo PGM support for VPID? Currently just flush everything. */ 8494 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */ );8448 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fPdpesMapped */); 8495 8449 iemVmxVmSucceed(pVCpu); 8496 8450 } -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r91281 r91580 492 492 return rc; 493 493 if (rc == VERR_NEM_FLUSH_TLB) 494 return PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/); 494 { 495 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/, false /*fPdpesMapped*/); 496 return rc; 497 } 495 498 AssertLogRelRCReturn(rc, rc); 496 499 return rc; … … 1104 1107 if (fUpdateCr3) 1105 1108 { 1106 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3); 1107 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 1109 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/); 1110 if (rc == VINF_SUCCESS) 1111 { /* likely */ } 1112 else 1113 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 1108 1114 } 1109 1115 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r91345 r91580 2187 2187 2188 2188 /** 2189 * Checks whether the given PAE PDPEs are potentially valid for the guest. 2190 * 2191 * @returns @c true if the PDPE is valid, @c false otherwise. 2192 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2193 * @param paPaePdpes The PAE PDPEs to validate. 2194 * 2195 * @remarks This function -only- checks the reserved bits in the PDPE entries. 2196 */ 2197 VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes) 2198 { 2199 Assert(paPaePdpes); 2200 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 2201 { 2202 X86PDPE const PaePdpe = paPaePdpes[i]; 2203 if ( !(PaePdpe.u & X86_PDPE_P) 2204 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask)) 2205 { /* likely */ } 2206 else 2207 return false; 2208 } 2209 return true; 2210 } 2211 2212 2213 /** 2189 2214 * Performs the lazy mapping of the 32-bit guest PD. 2190 2215 * … … 2380 2405 * @param pVCpu The cross context virtual CPU structure. 2381 2406 */ 2382 static void pgmGst UpdatePaePdpes(PVMCPU pVCpu)2407 static void pgmGstFlushPaePdpes(PVMCPU pVCpu) 2383 2408 { 2384 2409 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++) … … 2392 2417 2393 2418 /** 2419 * Gets the PGM CR3 value masked according to the current guest mode. 2420 * 2421 * @returns The masked PGM CR3 value. 2422 * @param pVCpu The cross context virtual CPU structure. 2423 * @param uCr3 The raw guest CR3 value. 2424 */ 2425 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3) 2426 { 2427 RTGCPHYS GCPhysCR3; 2428 switch (pVCpu->pgm.s.enmGuestMode) 2429 { 2430 case PGMMODE_PAE: 2431 case PGMMODE_PAE_NX: 2432 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK); 2433 break; 2434 case PGMMODE_AMD64: 2435 case PGMMODE_AMD64_NX: 2436 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK); 2437 break; 2438 default: 2439 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK); 2440 break; 2441 } 2442 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 2443 return GCPhysCR3; 2444 } 2445 2446 2447 /** 2394 2448 * Performs and schedules necessary updates following a CR3 load or reload. 2395 2449 * … … 2399 2453 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can 2400 2454 * safely be ignored and overridden since the FF will be set too then. 2401 * @param pVCpu The cross context virtual CPU structure. 2402 * @param cr3 The new cr3. 2403 * @param fGlobal Indicates whether this is a global flush or not. 2404 */ 2405 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal) 2455 * @param pVCpu The cross context virtual CPU structure. 2456 * @param cr3 The new cr3. 2457 * @param fGlobal Indicates whether this is a global flush or not. 2458 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 2459 */ 2460 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped) 2406 2461 { 2407 2462 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a); … … 2423 2478 */ 2424 2479 int rc = VINF_SUCCESS; 2425 RTGCPHYS GCPhysCR3;2426 switch (pVCpu->pgm.s.enmGuestMode)2427 {2428 case PGMMODE_PAE:2429 case PGMMODE_PAE_NX:2430 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);2431 break;2432 case PGMMODE_AMD64:2433 case PGMMODE_AMD64_NX:2434 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);2435 break;2436 default:2437 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);2438 break;2439 }2440 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);2441 2442 2480 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3; 2481 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3); 2443 2482 if (GCPhysOldCR3 != GCPhysCR3) 2444 2483 { … … 2448 2487 2449 2488 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2450 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3 );2489 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped); 2451 2490 if (RT_LIKELY(rc == VINF_SUCCESS)) 2452 2491 { … … 2494 2533 2495 2534 /* 2496 * UpdatePAE PDPTEs.2535 * Flush PAE PDPTEs. 2497 2536 */ 2498 2537 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode)) 2499 pgmGst UpdatePaePdpes(pVCpu);2538 pgmGstFlushPaePdpes(pVCpu); 2500 2539 } 2501 2540 … … 2520 2559 * paging modes). This can safely be ignored and overridden since the 2521 2560 * FF will be set too then. 2522 * @param pVCpu The cross context virtual CPU structure. 2523 * @param cr3 The new cr3. 2524 */ 2525 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3) 2561 * @param pVCpu The cross context virtual CPU structure. 2562 * @param cr3 The new CR3. 2563 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 2564 */ 2565 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped) 2526 2566 { 2527 2567 VMCPU_ASSERT_EMT(pVCpu); … … 2537 2577 */ 2538 2578 int rc = VINF_SUCCESS; 2539 RTGCPHYS GCPhysCR3; 2540 switch (pVCpu->pgm.s.enmGuestMode) 2541 { 2542 case PGMMODE_PAE: 2543 case PGMMODE_PAE_NX: 2544 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK); 2545 break; 2546 case PGMMODE_AMD64: 2547 case PGMMODE_AMD64_NX: 2548 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK); 2549 break; 2550 default: 2551 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK); 2552 break; 2553 } 2554 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 2555 2579 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3); 2556 2580 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 2557 2581 { … … 2561 2585 2562 2586 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2563 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3 );2587 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped); 2564 2588 2565 2589 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */ 2566 2590 } 2567 2591 /* 2568 * UpdatePAE PDPTEs.2592 * Flush PAE PDPTEs. 2569 2593 */ 2570 2594 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode)) 2571 pgmGst UpdatePaePdpes(pVCpu);2595 pgmGstFlushPaePdpes(pVCpu); 2572 2596 2573 2597 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3); … … 2636 2660 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3; 2637 2661 2638 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old); 2639 RTGCPHYS GCPhysCR3; 2640 switch (pVCpu->pgm.s.enmGuestMode) 2641 { 2642 case PGMMODE_PAE: 2643 case PGMMODE_PAE_NX: 2644 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK); 2645 break; 2646 case PGMMODE_AMD64: 2647 case PGMMODE_AMD64_NX: 2648 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK); 2649 break; 2650 default: 2651 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK); 2652 break; 2653 } 2654 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 2655 2662 RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old); 2663 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3); 2656 2664 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3) 2657 2665 { … … 2660 2668 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE); 2661 2669 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2662 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3 );2670 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */); 2663 2671 } 2664 2672 … … 2724 2732 PGM_INVL_VCPU_TLBS(pVCpu); 2725 2733 return rc; 2734 } 2735 2736 2737 /** 2738 * Maps all the PAE PDPE entries. 2739 * 2740 * @returns VBox status code. 2741 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2742 * @param paPaePdpes The new PAE PDPE values. 2743 * 2744 * @remarks This function may be invoked during the process of changing the guest 2745 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not 2746 * reflect PAE paging just yet. 2747 */ 2748 VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes) 2749 { 2750 Assert(paPaePdpes); 2751 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 2752 { 2753 X86PDPE const PaePdpe = paPaePdpes[i]; 2754 2755 /* 2756 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs 2757 * are deferred.[1] Also, different situations require different handling of invalid 2758 * PDPE entries. Here we assume the caller has already validated or doesn't require 2759 * validation of the PDPEs. 2760 * 2761 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode". 2762 */ 2763 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P) 2764 { 2765 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2766 RTHCPTR HCPtr; 2767 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK); 2768 2769 PGM_LOCK_VOID(pVM); 2770 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 2771 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR); 2772 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr); 2773 PGM_UNLOCK(pVM); 2774 if (RT_SUCCESS(rc)) 2775 { 2776 # ifdef IN_RING3 2777 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr; 2778 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR; 2779 # else 2780 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR; 2781 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr; 2782 # endif 2783 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys; 2784 continue; 2785 } 2786 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i)); 2787 } 2788 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 2789 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 2790 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 2791 } 2792 2793 return VINF_SUCCESS; 2794 } 2795 2796 2797 /** 2798 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3. 2799 * 2800 * @returns VBox status code. 2801 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2802 * @param cr3 The guest CR3 value. 2803 * 2804 * @remarks This function may be invoked during the process of changing the guest 2805 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect 2806 * PAE paging just yet. 2807 */ 2808 VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3) 2809 { 2810 /* 2811 * Read the page-directory-pointer table (PDPT) at CR3. 2812 */ 2813 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2814 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK); 2815 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 2816 2817 PGM_LOCK_VOID(pVM); 2818 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3); 2819 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR); 2820 2821 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES]; 2822 RTHCPTR HCPtrGuestCr3; 2823 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3); 2824 PGM_UNLOCK(pVM); 2825 AssertRCReturn(rc, rc); 2826 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes)); 2827 2828 /* 2829 * Validate the page-directory-pointer table entries (PDPE). 2830 */ 2831 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0])) 2832 { 2833 /* 2834 * Map the PDPT. 2835 * We deliberately don't update PGM's GCPhysCR3 here as it's expected 2836 * that PGMFlushTLB will be called soon and only a change to CR3 then 2837 * will cause the shadow page tables to be updated. 2838 */ 2839 # ifdef IN_RING3 2840 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3; 2841 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR; 2842 # else 2843 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR; 2844 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3; 2845 # endif 2846 2847 /* 2848 * Update CPUM. 2849 * We do this prior to mapping the PDPEs to keep the order consistent 2850 * with what's used in HM. In practice, it doesn't really matter. 2851 */ 2852 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]); 2853 2854 /* 2855 * Map the PDPEs. 2856 */ 2857 return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]); 2858 } 2859 return VERR_PGM_PAE_PDPE_RSVD; 2726 2860 } 2727 2861 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r91271 r91580 53 53 PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0); 54 54 #endif 55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3 );55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped); 56 56 PGM_BTH_DECL(int, UnmapCR3)(PVMCPUCC pVCpu); 57 57 … … 4295 4295 * 4296 4296 * @param pVCpu The cross context virtual CPU structure. 4297 * @param GCPhysCR3 The physical address in the CR3 register. (A20 4298 * mask already applied.) 4297 * @param GCPhysCR3 The physical address in the CR3 register. (A20 mask 4298 * already applied.) 4299 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 4299 4300 */ 4300 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3 )4301 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped) 4301 4302 { 4302 4303 PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 4304 int rc = VINF_SUCCESS; 4303 4305 4304 4306 /* Update guest paging info. */ … … 4310 4312 PGM_A20_ASSERT_MASKED(pVCpu, GCPhysCR3); 4311 4313 4312 /* 4313 * Map the page CR3 points at. 4314 */ 4315 RTHCPTR HCPtrGuestCR3; 4316 PGM_LOCK_VOID(pVM); 4317 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3); 4318 AssertReturn(pPageCR3, VERR_PGM_INVALID_CR3_ADDR); 4319 /** @todo this needs some reworking wrt. locking? */ 4320 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */ 4321 PGM_UNLOCK(pVM); 4322 if (RT_SUCCESS(rc)) 4323 { 4314 # if PGM_GST_TYPE == PGM_TYPE_PAE 4315 if (!fPdpesMapped) 4316 # else 4317 NOREF(fPdpesMapped); 4318 #endif 4319 { 4320 /* 4321 * Map the page CR3 points at. 4322 */ 4323 RTHCPTR HCPtrGuestCR3; 4324 PGM_LOCK_VOID(pVM); 4325 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3); 4326 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR); 4327 /** @todo this needs some reworking wrt. locking? */ 4328 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */ 4329 PGM_UNLOCK(pVM); 4330 if (RT_SUCCESS(rc)) 4331 { 4324 4332 # if PGM_GST_TYPE == PGM_TYPE_32BIT 4325 4333 # ifdef IN_RING3 4326 pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3;4327 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;4334 pVCpu->pgm.s.pGst32BitPdR3 = (PX86PD)HCPtrGuestCR3; 4335 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR; 4328 4336 # else 4329 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR;4330 pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3;4337 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR3PTR; 4338 pVCpu->pgm.s.pGst32BitPdR0 = (PX86PD)HCPtrGuestCR3; 4331 4339 # endif 4332 4340 4333 4341 # elif PGM_GST_TYPE == PGM_TYPE_PAE 4334 4342 # ifdef IN_RING3 4335 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3;4336 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;4343 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCR3; 4344 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR; 4337 4345 # else 4338 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR; 4339 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3; 4340 # endif 4341 4342 /* 4343 * Map the 4 PDs too. 4344 */ 4345 X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES]; 4346 memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes)); 4347 CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]); 4348 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 4349 { 4350 X86PDPE PaePdpe = aGstPaePdpes[i]; 4351 if (PaePdpe.u & X86_PDPE_P) 4352 { 4353 RTHCPTR HCPtr; 4354 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK); 4355 PGM_LOCK_VOID(pVM); 4356 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 4357 AssertReturn(pPage, VERR_PGM_INVALID_PDPE_ADDR); 4358 int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr); 4359 PGM_UNLOCK(pVM); 4360 if (RT_SUCCESS(rc2)) 4361 { 4362 # ifdef IN_RING3 4363 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr; 4364 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR; 4365 # else 4366 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR; 4367 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr; 4368 # endif 4369 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys; 4370 continue; 4371 } 4372 AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i)); 4373 } 4374 4375 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 4376 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 4377 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 4378 } 4346 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR; 4347 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCR3; 4348 # endif 4349 4350 /* 4351 * Update CPUM and map the 4 PDs too. 4352 */ 4353 X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES]; 4354 memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes)); 4355 CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]); 4356 PGMGstMapPaePdpes(pVCpu, &aGstPaePdpes[0]); 4379 4357 4380 4358 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 4381 4359 # ifdef IN_RING3 4382 pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3;4383 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;4360 pVCpu->pgm.s.pGstAmd64Pml4R3 = (PX86PML4)HCPtrGuestCR3; 4361 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR; 4384 4362 # else 4385 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;4386 pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3;4387 # endif 4388 # endif 4389 }4390 else4391 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));4392 4363 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR; 4364 pVCpu->pgm.s.pGstAmd64Pml4R0 = (PX86PML4)HCPtrGuestCR3; 4365 # endif 4366 # endif 4367 } 4368 else 4369 AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3)); 4370 } 4393 4371 #else /* prot/real stub */ 4394 int rc = VINF_SUCCESS;4372 NOREF(fPdpesMapped); 4395 4373 #endif 4396 4374 … … 4422 4400 4423 4401 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32))); 4424 rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 4425 NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, 4426 &pNewShwPageCR3); 4427 AssertFatalRC(rc); 4428 rc = VINF_SUCCESS; 4402 int const rc2 = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, PGMPOOLACCESS_DONTCARE, 4403 PGM_A20_IS_ENABLED(pVCpu), NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, &pNewShwPageCR3); 4404 AssertFatalRC(rc2); 4429 4405 4430 4406 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3) = pNewShwPageCR3; … … 4444 4420 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 4445 4421 # endif 4446 rc= pgmMapActivateCR3(pVM, pNewShwPageCR3);4447 AssertRCReturn(rc , rc);4422 int const rc3 = pgmMapActivateCR3(pVM, pNewShwPageCR3); 4423 AssertRCReturn(rc3, rc3); 4448 4424 # endif 4449 4425 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r86487 r91580 53 53 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE); 54 54 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE); 55 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3 );55 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */); 56 56 } 57 57 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r91308 r91580 2924 2924 { 2925 2925 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3)); 2926 PGMUpdateCR3(pVCpu, pCtx->cr3 );2926 PGMUpdateCR3(pVCpu, pCtx->cr3, false /* fPdpesMapped */); 2927 2927 } 2928 2928 } … … 3994 3994 /* Could happen as a result of longjump. */ 3995 3995 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 3996 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu) );3996 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */); 3997 3997 3998 3998 /* Update pending interrupts into the APIC's IRR. */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r91358 r91580 8053 8053 { 8054 8054 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); 8055 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu) );8056 }8057 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));8055 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */); 8056 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8057 } 8058 8058 } 8059 8059 … … 10902 10902 { 10903 10903 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3)); 10904 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu) );10904 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */); 10905 10905 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, 10906 10906 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS); -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r91323 r91580 2190 2190 { 2191 2191 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n")); 2192 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3); 2193 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 2192 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/); 2193 if (rc == VINF_SUCCESS) 2194 { /* likely */ } 2195 else 2196 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2); 2194 2197 } 2195 2198 else -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r91271 r91580 1452 1452 { 1453 1453 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc); 1454 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));1454 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */); 1455 1455 if (RT_FAILURE(rc2)) 1456 1456 return rc2; -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r88654 r91580 1712 1712 { 1713 1713 LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n")); 1714 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true );1714 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*fPdpesMapped*/); 1715 1715 AssertRCReturn(rc, rc); 1716 1716 if (rcStrict == VINF_NEM_FLUSH_TLB) -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r91271 r91580 2343 2343 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData; 2344 2344 if ( idxBth < RT_ELEMENTS(g_aPgmBothModeData) 2345 && g_aPgmBothModeData[idxBth].pfn MapCR3)2345 && g_aPgmBothModeData[idxBth].pfnUnmapCR3) 2346 2346 { 2347 2347 rc = g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu); -
trunk/src/VBox/VMM/include/PGMInternal.h
r91450 r91580 2798 2798 DECLCALLBACKMEMBER(int, pfnPrefetchPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)); 2799 2799 DECLCALLBACKMEMBER(int, pfnVerifyAccessSyncPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); 2800 DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3 ));2800 DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fPdpesMapped)); 2801 2801 DECLCALLBACKMEMBER(int, pfnUnmapCR3,(PVMCPUCC pVCpu)); 2802 2802 DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
Note:
See TracChangeset
for help on using the changeset viewer.