Changeset 92583 in vbox
- Timestamp:
- Nov 24, 2021 9:13:14 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148465
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r92553 r92583 4356 4356 /* Inform PGM. */ 4357 4357 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */ 4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* f PdpesMapped */);4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fCr3Mapped */); 4359 4359 AssertRCReturn(rc, rc); 4360 4360 /* ignore informational status codes */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r92568 r92583 5870 5870 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) ) 5871 5871 { 5872 bool f PdpesMapped;5872 bool fCr3Mapped; 5873 5873 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX 5874 5874 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER) 5875 5875 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5876 f PdpesMapped = false;5876 fCr3Mapped = false; 5877 5877 else 5878 5878 { 5879 5879 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3); 5880 f PdpesMapped = true;5880 fCr3Mapped = true; 5881 5881 } 5882 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, f PdpesMapped);5882 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fCr3Mapped); 5883 5883 AssertRCReturn(rc, rc); 5884 5884 /* ignore informational status codes */ … … 5981 5981 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) 5982 5982 { 5983 bool f PdpesMapped;5983 bool fCr3Mapped; 5984 5984 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu)) 5985 5985 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5986 f PdpesMapped = false;5986 fCr3Mapped = false; 5987 5987 else 5988 5988 { 5989 5989 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX); 5990 5990 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX); 5991 f PdpesMapped = true;5991 fCr3Mapped = true; 5992 5992 } 5993 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), f PdpesMapped);5993 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), fCr3Mapped); 5994 5994 AssertRCReturn(rc, rc); 5995 5995 /* ignore informational status codes */ … … 6073 6073 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */)) 6074 6074 { 6075 bool f PdpesMapped;6075 bool fCr3Mapped; 6076 6076 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER) 6077 6077 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 6078 f PdpesMapped = false;6078 fCr3Mapped = false; 6079 6079 else 6080 6080 { 6081 6081 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX); 6082 6082 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3); 6083 f PdpesMapped = true;6083 fCr3Mapped = true; 6084 6084 } 6085 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, f PdpesMapped);6085 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */, fCr3Mapped); 6086 6086 AssertRCReturn(rc, rc); 6087 6087 /* ignore informational status codes */ -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r92546 r92583 82 82 * 83 83 * @returns Strict VBox status code. 84 * @param pVCpu The cross context virtual CPU structure. 85 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 86 */ 87 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fPdpesMapped) 84 * @param pVCpu The cross context virtual CPU structure. 85 * @param fCr3Mapped Whether CR3 (and in case of PAE paging, whether PDPEs 86 * and PDPT) have been mapped. 87 */ 88 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPUCC pVCpu, bool fCr3Mapped) 88 89 { 89 90 /* … … 105 106 if (rc == VINF_SUCCESS) 106 107 { 107 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, f PdpesMapped);108 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fCr3Mapped); 108 109 AssertRCReturn(rc, rc); 109 110 } … … 723 724 * Invalid PAE PDPEs here causes a #VMEXIT. 724 725 */ 725 bool f PdpesMapped;726 bool fCr3Mapped; 726 727 if ( !pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging 727 728 && CPUMIsPaePagingEnabled(pVmcbNstGst->u64CR0, pVmcbNstGst->u64CR4, uValidEfer)) … … 729 730 rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcbNstGst->u64CR3); 730 731 if (RT_SUCCESS(rc)) 731 f PdpesMapped = true;732 fCr3Mapped = true; 732 733 else 733 734 { … … 737 738 } 738 739 else 739 f PdpesMapped = false;740 fCr3Mapped = false; 740 741 741 742 /* … … 777 778 * Update PGM, IEM and others of a world-switch. 778 779 */ 779 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, f PdpesMapped);780 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, fCr3Mapped); 780 781 if (rcStrict == VINF_SUCCESS) 781 782 { /* likely */ } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r92581 r92583 153 153 } while (0) 154 154 155 /** Marks a VM-exit failure with a diagnostic reason , logs and returns. */156 # define IEM_VMX_VMEXIT_FAILED _RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \155 /** Marks a VM-exit failure with a diagnostic reason and logs. */ 156 # define IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \ 157 157 do \ 158 158 { \ … … 160 160 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \ 161 161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \ 162 } while (0) 163 164 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */ 165 # define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \ 166 do \ 167 { \ 168 IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag); \ 162 169 return VERR_VMX_VMEXIT_FAILED; \ 163 170 } while (0) … … 1230 1237 * Performs the VMX transition to/from VMX non-root mode. 1231 1238 * 1232 * @param pVCpu The cross context virtual CPU structure. 1233 * @param fPdpesMapped Whether the PAE PDPTEs (and PDPT) have been mapped. 1239 * @param pVCpu The cross context virtual CPU structure. 1240 * @param fCr3Mapped Whether CR3 (and in case of PAE paging, whether PDPTEs and 1241 * PDPT) have been mapped. 1234 1242 */ 1235 IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool f PdpesMapped)1243 IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu, bool fCr3Mapped) 1236 1244 { 1237 1245 /* … … 1253 1261 if (rc == VINF_SUCCESS) 1254 1262 { 1255 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, f PdpesMapped);1263 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* fGlobal */, fCr3Mapped); 1256 1264 AssertRCReturn(rc, rc); 1257 1265 } … … 1888 1896 1889 1897 /** 1890 * Checks the host PAE PDPTEs assuming we are switching to a PAE mode host.1891 *1892 * @param pVCpu The cross context virtual CPU structure.1893 * @param uExitReason The VMX instruction name (for logging purposes).1894 *1895 * @remarks Caller must ensure the preconditions are met before calling this1896 * function as failure here will trigger VMX aborts!1897 */1898 IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPUCC pVCpu, uint32_t uExitReason)1899 {1900 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;1901 const char * const pszFailure = "VMX-abort";1902 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);1903 if (RT_SUCCESS(rc))1904 return rc;1905 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpte);1906 }1907 1908 1909 /**1910 1898 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit. 1911 1899 * … … 2019 2007 * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries". 2020 2008 */ 2021 bool f PdpesMapped;2009 bool fCr3Mapped; 2022 2010 if ( (pVmcs->u64HostCr4.u & X86_CR4_PAE) 2023 2011 && !fHostInLongMode … … 2025 2013 || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3)) 2026 2014 { 2027 int const rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason); 2028 if (RT_FAILURE(rc)) 2029 { 2030 Log(("VM-exit attempting to load invalid PDPTEs -> VMX-Abort\n")); 2015 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u); 2016 if (RT_SUCCESS(rc)) 2017 { /* likely*/ } 2018 else 2019 { 2020 IEM_VMX_VMEXIT_FAILED(pVCpu, uExitReason, "VMX-abort", kVmxVDiag_Vmexit_HostPdpte); 2031 2021 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE); 2032 2022 } 2033 f PdpesMapped = true;2023 fCr3Mapped = true; 2034 2024 } 2035 2025 else 2036 f PdpesMapped = false;2026 fCr3Mapped = false; 2037 2027 2038 2028 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu); … … 2051 2041 2052 2042 /* Perform the VMX transition (PGM updates). */ 2053 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, f PdpesMapped);2043 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fCr3Mapped); 2054 2044 if (rcStrict == VINF_SUCCESS) 2055 2045 { /* likely */ } … … 5672 5662 * @returns VBox status code. 5673 5663 * @param pVCpu The cross context virtual CPU structure. 5674 * @param pfPdpesMapped Where to store whether PAE PDPTEs (and PDPT) have been5675 * mapped as part of checking guest state.5676 5664 * @param pszInstr The VMX instruction name (for logging purposes). 5677 5665 */ … … 7494 7482 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; 7495 7483 7496 /* W e would have mapped PAE PDPTEs when PAE paging is used without EPT. */7497 bool const f PdpesMapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)7498 7484 /* When EPT isn't used, we would have validated and mapped CR3 and PDPTEs when PAE paging is enabled. */ 7485 bool const fCr3Mapped = !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT) 7486 && iemVmxVmcsIsGuestPaePagingEnabled(pVmcs); 7499 7487 7500 7488 /* Perform the VMX transition (PGM updates). */ 7501 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, f PdpesMapped);7489 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu, fCr3Mapped); 7502 7490 if (rcStrict == VINF_SUCCESS) 7503 7491 { /* likely */ } … … 8498 8486 /* Invalidate mappings for the linear address tagged with VPID. */ 8499 8487 /** @todo PGM support for VPID? Currently just flush everything. */ 8500 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* f PdpesMapped */);8488 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */); 8501 8489 iemVmxVmSucceed(pVCpu); 8502 8490 } … … 8525 8513 /* Invalidate all mappings with VPID. */ 8526 8514 /** @todo PGM support for VPID? Currently just flush everything. */ 8527 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* f PdpesMapped */);8515 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */); 8528 8516 iemVmxVmSucceed(pVCpu); 8529 8517 } … … 8542 8530 /* Invalidate all mappings with non-zero VPIDs. */ 8543 8531 /** @todo PGM support for VPID? Currently just flush everything. */ 8544 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* f PdpesMapped */);8532 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */); 8545 8533 iemVmxVmSucceed(pVCpu); 8546 8534 break; … … 8553 8541 /* Invalidate all mappings with VPID except global translations. */ 8554 8542 /** @todo PGM support for VPID? Currently just flush everything. */ 8555 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* f PdpesMapped */);8543 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */, false /* fCr3Mapped */); 8556 8544 iemVmxVmSucceed(pVCpu); 8557 8545 } -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r92541 r92583 1117 1117 if (fUpdateCr3) 1118 1118 { 1119 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*f PdpesMapped*/);1119 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/); 1120 1120 if (rc == VINF_SUCCESS) 1121 1121 { /* likely */ } -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r92547 r92583 2381 2381 * @param cr3 The new cr3. 2382 2382 * @param fGlobal Indicates whether this is a global flush or not. 2383 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 2384 */ 2385 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped) 2383 * @param fCr3Mapped Whether CR3 (and in case of PAE paging, whether PDPEs 2384 * and PDPT) has been mapped. 2385 */ 2386 VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fCr3Mapped) 2386 2387 { 2387 2388 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a); … … 2405 2406 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3); 2406 2407 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2407 if ( !f PdpesMapped2408 if ( !fCr3Mapped 2408 2409 && CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2409 2410 { … … 2428 2429 2429 2430 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2430 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, f PdpesMapped);2431 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fCr3Mapped); 2431 2432 if (RT_LIKELY(rc == VINF_SUCCESS)) 2432 2433 { } … … 2489 2490 * @param pVCpu The cross context virtual CPU structure. 2490 2491 * @param cr3 The new CR3. 2491 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 2492 */ 2493 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped) 2492 * @param fCr3Mapped Whether CR3 (and in case of PAE paging, whether PDPEs 2493 * and PDPT) has been mapped. 2494 */ 2495 VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fCr3Mapped) 2494 2496 { 2495 2497 VMCPU_ASSERT_EMT(pVCpu); … … 2504 2506 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3); 2505 2507 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2506 if ( !f PdpesMapped2508 if ( !fCr3Mapped 2507 2509 && CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2508 2510 { … … 2527 2529 2528 2530 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2529 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, f PdpesMapped);2531 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fCr3Mapped); 2530 2532 2531 2533 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */ … … 2610 2612 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE); 2611 2613 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 2612 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* f PdpesMapped */);2614 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fCr3Mapped */); 2613 2615 } 2614 2616 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r92561 r92583 53 53 PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0); 54 54 #endif 55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool f PdpesMapped);55 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped); 56 56 PGM_BTH_DECL(int, UnmapCR3)(PVMCPUCC pVCpu); 57 57 … … 4131 4131 * @param GCPhysCR3 The physical address in the CR3 register. (A20 mask 4132 4132 * already applied.) 4133 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped. 4133 * @param fCr3Mapped Whether CR3 (and in case of PAE paging, whether PDPEs 4134 * and PDPT) has been mapped. 4134 4135 */ 4135 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool f PdpesMapped)4136 PGM_BTH_DECL(int, MapCR3)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped) 4136 4137 { 4137 4138 PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); … … 4147 4148 4148 4149 # if PGM_GST_TYPE == PGM_TYPE_PAE 4149 if (!f PdpesMapped)4150 if (!fCr3Mapped) 4150 4151 # else 4151 NOREF(f PdpesMapped);4152 NOREF(fCr3Mapped); 4152 4153 #endif 4153 4154 { … … 4204 4205 } 4205 4206 #else /* prot/real stub */ 4206 NOREF(f PdpesMapped);4207 NOREF(fCr3Mapped); 4207 4208 #endif 4208 4209 -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r92459 r92583 52 52 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE); 53 53 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE); 54 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* f PdpesMapped */);54 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fCr3Mapped */); 55 55 } 56 56 -
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r92498 r92583 5585 5585 { 5586 5586 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); 5587 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* f PdpesMapped */);5587 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */); 5588 5588 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 5589 5589 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r92495 r92583 2925 2925 { 2926 2926 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3)); 2927 PGMUpdateCR3(pVCpu, pCtx->cr3, false /* f PdpesMapped */);2927 PGMUpdateCR3(pVCpu, pCtx->cr3, false /* fCr3Mapped */); 2928 2928 } 2929 2929 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r92495 r92583 8058 8058 { 8059 8059 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); 8060 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* f PdpesMapped */);8060 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */); 8061 8061 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8062 8062 } -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r92541 r92583 2781 2781 { 2782 2782 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n")); 2783 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*f PdpesMapped*/);2783 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fCr3Mapped*/); 2784 2784 if (rc == VINF_SUCCESS) 2785 2785 { /* likely */ } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r92520 r92583 1457 1457 { 1458 1458 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc); 1459 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* f PdpesMapped */);1459 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fCr3Mapped */); 1460 1460 if (RT_FAILURE(rc2)) 1461 1461 return rc2; -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r92560 r92583 1125 1125 if (fUpdateCr3) 1126 1126 { 1127 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*f PdpesMapped*/);1127 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/); 1128 1128 if (rc == VINF_SUCCESS) 1129 1129 { /* likely */ } -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r92579 r92583 1566 1566 if (fUpdateCr3) 1567 1567 { 1568 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*f PdpesMapped*/);1568 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fCr3Mapped*/); 1569 1569 if (rc == VINF_SUCCESS) 1570 1570 { /* likely */ } -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r92468 r92583 1896 1896 { 1897 1897 LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n")); 1898 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*f PdpesMapped*/);1898 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /*fGlobal*/, false /*fCr3Mapped*/); 1899 1899 AssertRCReturn(rc, rc); 1900 1900 if (rcStrict == VINF_NEM_FLUSH_TLB) -
trunk/src/VBox/VMM/include/PGMInternal.h
r92480 r92583 2692 2692 DECLCALLBACKMEMBER(int, pfnPrefetchPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)); 2693 2693 DECLCALLBACKMEMBER(int, pfnVerifyAccessSyncPage,(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); 2694 DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool f PdpesMapped));2694 DECLCALLBACKMEMBER(int, pfnMapCR3,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3, bool fCr3Mapped)); 2695 2695 DECLCALLBACKMEMBER(int, pfnUnmapCR3,(PVMCPUCC pVCpu)); 2696 2696 DECLCALLBACKMEMBER(int, pfnEnter,(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3));
Note:
See TracChangeset
for help on using the changeset viewer.