Changeset 96879 in vbox for trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
- Timestamp:
- Sep 26, 2022 5:43:43 PM (2 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r96407 r96879 66 66 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, 67 67 PPGMPTWALKGST pGstWalk); 68 static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk); 68 static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, 69 PPGMPTWALKGST pGstWalk); 69 70 static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3); 71 static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD, 72 PPGMPTWALKGST pGstWalkAll); 70 73 #endif 71 74 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); … … 74 77 75 78 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 76 /* Guest - EPT SLAT is identical for all guest paging mode. */77 79 # define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT 78 # define PGM_GST_TYPE PGM_TYPE_EPT 79 # include "PGMGstDefs.h" 80 # include "PGMSlatDefs.h" 80 81 # include "PGMAllGstSlatEpt.cpp.h" 81 # undef PGM_ GST_TYPE82 # undef PGM_SLAT_TYPE 82 83 #endif 83 84 … … 899 900 #undef PGMMODEDATABTH_NULL_ENTRY 900 901 }; 902 903 904 /** 905 * Gets the CR3 mask corresponding to the given paging mode. 906 * 907 * @returns The CR3 mask. 908 * @param enmMode The paging mode. 909 * @param enmSlatMode The second-level address translation mode. 910 */ 911 DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode) 912 { 913 /** @todo This work can be optimized either by storing the masks in 914 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and 915 * store the result when entering guest mode since we currently use it only 916 * for enmGuestMode. */ 917 if (enmSlatMode == PGMSLAT_DIRECT) 918 { 919 Assert(enmMode != PGMMODE_EPT); 920 switch (enmMode) 921 { 922 case PGMMODE_PAE: 923 case PGMMODE_PAE_NX: 924 return X86_CR3_PAE_PAGE_MASK; 925 case PGMMODE_AMD64: 926 case PGMMODE_AMD64_NX: 927 return X86_CR3_AMD64_PAGE_MASK; 928 default: 929 return X86_CR3_PAGE_MASK; 930 } 931 } 932 else 933 { 934 Assert(enmSlatMode == PGMSLAT_EPT); 935 return X86_CR3_EPT_PAGE_MASK; 936 } 937 } 938 939 940 /** 941 * Gets the masked CR3 value according to the current guest paging mode. 942 * 943 * @returns The masked PGM CR3 value. 944 * @param pVCpu The cross context virtual CPU structure. 945 * @param uCr3 The raw guest CR3 value. 946 */ 947 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3) 948 { 949 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode); 950 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask); 951 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 952 return GCPhysCR3; 953 } 901 954 902 955 … … 1670 1723 1671 1724 1725 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1726 /** 1727 * Syncs the SHADOW nested-guest page directory pointer for the specified address. 1728 * Allocates backing pages in case the PDPT or PML4 entry is missing. 1729 * 1730 * @returns VBox status code. 1731 * @param pVCpu The cross context virtual CPU structure. 1732 * @param GCPhysNested The nested-guest physical address. 1733 * @param ppPdpt Where to store the PDPT. Optional, can be NULL. 1734 * @param ppPD Where to store the PD. Optional, can be NULL. 1735 * @param pGstWalkAll The guest walk info. 1736 */ 1737 static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD, 1738 PPGMPTWALKGST pGstWalkAll) 1739 { 1740 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1741 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1742 int rc; 1743 1744 PPGMPOOLPAGE pShwPage; 1745 Assert(pVM->pgm.s.fNestedPaging); 1746 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT); 1747 PGM_LOCK_ASSERT_OWNER(pVM); 1748 1749 /* 1750 * PML4 level. 1751 */ 1752 { 1753 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 1754 Assert(pPml4); 1755 1756 /* Allocate page directory pointer table if not present. */ 1757 { 1758 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask; 1759 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK; 1760 PEPTPML4E pPml4e = &pPml4->a[iPml4e]; 1761 1762 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK))) 1763 { 1764 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK; 1765 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE, 1766 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/, 1767 &pShwPage); 1768 AssertRCReturn(rc, rc); 1769 1770 /* Hook up the new PDPT now. */ 1771 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags); 1772 } 1773 else 1774 { 1775 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK); 1776 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1777 1778 pgmPoolCacheUsed(pPool, pShwPage); 1779 1780 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */ 1781 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags)) 1782 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags); 1783 } 1784 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 1785 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e)); 1786 } 1787 } 1788 1789 /* 1790 * PDPT level. 1791 */ 1792 { 1793 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */ 1794 1795 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1796 if (ppPdpt) 1797 *ppPdpt = pPdpt; 1798 1799 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask; 1800 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK; 1801 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte]; 1802 1803 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK))) 1804 { 1805 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK; 1806 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 1807 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage); 1808 AssertRCReturn(rc, rc); 1809 1810 /* Hook up the new PD now. */ 1811 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags); 1812 } 1813 else 1814 { 1815 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK); 1816 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1817 1818 pgmPoolCacheUsed(pPool, pShwPage); 1819 1820 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */ 1821 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags)) 1822 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags); 1823 } 1824 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 1825 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte)); 1826 1827 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1828 } 1829 1830 return VINF_SUCCESS; 1831 } 1832 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 1833 1834 1672 1835 #ifdef IN_RING0 1673 1836 /** … … 1787 1950 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData; 1788 1951 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE); 1789 AssertReturn(g_aPgmBothModeData[idxBth].pfn MapCR3, VERR_PGM_MODE_IPE);1952 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE); 1790 1953 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu); 1791 1954 } … … 2135 2298 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd)); 2136 2299 2137 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;2300 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2138 2301 PPGMPAGE pPage; 2139 2302 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); … … 2176 2339 PGM_LOCK_VOID(pVM); 2177 2340 2178 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;2341 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2179 2342 PPGMPAGE pPage; 2180 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to2181 * guest-physical address here. */2182 2343 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); 2183 2344 if (RT_SUCCESS(rc)) … … 2272 2433 PGM_LOCK_VOID(pVM); 2273 2434 2274 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;2435 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2275 2436 PPGMPAGE pPage; 2276 2437 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); … … 2369 2530 2370 2531 2371 /**2372 * Gets the CR3 mask corresponding to the given paging mode.2373 *2374 * @returns The CR3 mask.2375 * @param enmMode The paging mode.2376 */2377 DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)2378 {2379 /** @todo This work can be optimized either by storing the masks in2380 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and2381 * store the result when entering guest mode since we currently use it only2382 * for enmGuestMode. */2383 switch (enmMode)2384 {2385 case PGMMODE_PAE:2386 case PGMMODE_PAE_NX:2387 return X86_CR3_PAE_PAGE_MASK;2388 case PGMMODE_AMD64:2389 case PGMMODE_AMD64_NX:2390 return X86_CR3_AMD64_PAGE_MASK;2391 case PGMMODE_EPT:2392 return X86_CR3_EPT_PAGE_MASK;2393 default:2394 return X86_CR3_PAGE_MASK;2395 }2396 }2397 2398 2399 /**2400 * Gets the masked CR3 value according to the current guest paging mode.2401 *2402 * @returns The masked PGM CR3 value.2403 * @param pVCpu The cross context virtual CPU structure.2404 * @param uCr3 The raw guest CR3 value.2405 */2406 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)2407 {2408 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);2409 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);2410 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);2411 return GCPhysCR3;2412 }2413 2414 2415 2532 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2416 2533 /** … … 2429 2546 static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3) 2430 2547 { 2548 # if 0 2431 2549 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3) 2550 # endif 2432 2551 { 2433 2552 PGMPTWALK Walk; … … 2449 2568 } 2450 2569 2570 # if 0 2451 2571 /* 2452 2572 * If the nested-guest CR3 has not changed, then the previously … … 2455 2575 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3; 2456 2576 return VINF_SUCCESS; 2577 # endif 2457 2578 } 2458 2579 #endif … … 2495 2616 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode)) 2496 2617 { 2497 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));2498 2618 RTGCPHYS GCPhysOut; 2499 2619 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut); … … 2601 2721 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2602 2722 { 2603 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));2604 2723 RTGCPHYS GCPhysOut; 2605 2724 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut); … … 2610 2729 /* CR3 SLAT translation failed but we try to pretend it 2611 2730 succeeded for the reasons mentioned in PGMHCChangeMode(). */ 2612 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));2731 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc)); 2613 2732 int const rc2 = pgmGstUnmapCr3(pVCpu); 2614 2733 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 2615 2734 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS; 2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 2616 2736 return rc2; 2617 2737 } … … 2717 2837 succeeded for the reasons mentioned in PGMHCChangeMode(). */ 2718 2838 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2)); 2719 rc2 = pgmGstUnmapCr3(pVCpu);2720 2839 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 2721 2840 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS; … … 2811 2930 * PDPE entries. Here we assume the caller has already validated or doesn't require 2812 2931 * validation of the PDPEs. 2932 * 2933 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been 2934 * validated by the VMX transition. 2813 2935 * 2814 2936 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode". … … 2844 2966 } 2845 2967 2968 /* 2969 * Update CPUM with the PAE PDPEs. 2970 */ 2971 CPUMSetGuestPaePdpes(pVCpu, paPaePdpes); 2846 2972 return VINF_SUCCESS; 2847 2973 } … … 2868 2994 2869 2995 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2870 if (CPUMIsGuestVmxEptPa ePagingEnabled(pVCpu))2996 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2871 2997 { 2872 2998 RTGCPHYS GCPhysOut; … … 2908 3034 2909 3035 /* 2910 * Update CPUM. 2911 * We do this prior to mapping the PDPEs to keep the order consistent 2912 * with what's used in HM. In practice, it doesn't really matter. 2913 */ 2914 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]); 2915 2916 /* 2917 * Map the PDPEs. 3036 * Map the PDPEs and update CPUM. 2918 3037 */ 2919 3038 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]); … … 3320 3439 3321 3440 /* 3441 * Determine SLAT mode -before- entering the new shadow mode! 3442 */ 3443 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT; 3444 3445 /* 3322 3446 * Enter new shadow mode (if changed). 3323 3447 */ … … 3380 3504 * - Indicate that the CR3 is nested-guest physical address. 3381 3505 */ 3382 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu))3506 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) 3383 3507 { 3384 3508 if (PGMMODE_WITH_PAGING(enmGuestMode)) … … 3405 3529 * See Intel spec. 27.2.1 "EPT Overview". 3406 3530 */ 3407 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));3531 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc)); 3408 3532 3409 3533 /* Trying to coax PGM to succeed for the time being... */ 3410 3534 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS); 3411 3535 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3; 3412 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;3413 3536 pVCpu->pgm.s.enmGuestMode = enmGuestMode; 3414 3537 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode); … … 3416 3539 } 3417 3540 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3; 3418 GCPhysCR3 = Walk.GCPhys; 3419 } 3420 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3541 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK; 3542 } 3421 3543 } 3422 3544 else 3423 {3424 3545 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS); 3425 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;3426 }3427 3546 #endif 3428 3547 … … 3953 4072 PGM_LOCK_VOID(pVM); 3954 4073 pVCpu->pgm.s.uEptPtr = uEptPtr; 4074 pVCpu->pgm.s.pGstEptPml4R3 = 0; 4075 pVCpu->pgm.s.pGstEptPml4R0 = 0; 3955 4076 PGM_UNLOCK(pVM); 3956 4077 }
Note:
See TracChangeset
for help on using the changeset viewer.