Changeset 96879 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 26, 2022 5:43:43 PM (2 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r96407 r96879 66 66 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, 67 67 PPGMPTWALKGST pGstWalk); 68 static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk); 68 static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, 69 PPGMPTWALKGST pGstWalk); 69 70 static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3); 71 static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD, 72 PPGMPTWALKGST pGstWalkAll); 70 73 #endif 71 74 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); … … 74 77 75 78 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 76 /* Guest - EPT SLAT is identical for all guest paging mode. */77 79 # define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT 78 # define PGM_GST_TYPE PGM_TYPE_EPT 79 # include "PGMGstDefs.h" 80 # include "PGMSlatDefs.h" 80 81 # include "PGMAllGstSlatEpt.cpp.h" 81 # undef PGM_ GST_TYPE82 # undef PGM_SLAT_TYPE 82 83 #endif 83 84 … … 899 900 #undef PGMMODEDATABTH_NULL_ENTRY 900 901 }; 902 903 904 /** 905 * Gets the CR3 mask corresponding to the given paging mode. 906 * 907 * @returns The CR3 mask. 908 * @param enmMode The paging mode. 909 * @param enmSlatMode The second-level address translation mode. 910 */ 911 DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode) 912 { 913 /** @todo This work can be optimized either by storing the masks in 914 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and 915 * store the result when entering guest mode since we currently use it only 916 * for enmGuestMode. */ 917 if (enmSlatMode == PGMSLAT_DIRECT) 918 { 919 Assert(enmMode != PGMMODE_EPT); 920 switch (enmMode) 921 { 922 case PGMMODE_PAE: 923 case PGMMODE_PAE_NX: 924 return X86_CR3_PAE_PAGE_MASK; 925 case PGMMODE_AMD64: 926 case PGMMODE_AMD64_NX: 927 return X86_CR3_AMD64_PAGE_MASK; 928 default: 929 return X86_CR3_PAGE_MASK; 930 } 931 } 932 else 933 { 934 Assert(enmSlatMode == PGMSLAT_EPT); 935 return X86_CR3_EPT_PAGE_MASK; 936 } 937 } 938 939 940 /** 941 * Gets the masked CR3 value according to the current guest paging mode. 942 * 943 * @returns The masked PGM CR3 value. 944 * @param pVCpu The cross context virtual CPU structure. 945 * @param uCr3 The raw guest CR3 value. 946 */ 947 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3) 948 { 949 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode); 950 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask); 951 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3); 952 return GCPhysCR3; 953 } 901 954 902 955 … … 1670 1723 1671 1724 1725 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1726 /** 1727 * Syncs the SHADOW nested-guest page directory pointer for the specified address. 1728 * Allocates backing pages in case the PDPT or PML4 entry is missing. 1729 * 1730 * @returns VBox status code. 1731 * @param pVCpu The cross context virtual CPU structure. 1732 * @param GCPhysNested The nested-guest physical address. 1733 * @param ppPdpt Where to store the PDPT. Optional, can be NULL. 1734 * @param ppPD Where to store the PD. Optional, can be NULL. 1735 * @param pGstWalkAll The guest walk info. 1736 */ 1737 static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD, 1738 PPGMPTWALKGST pGstWalkAll) 1739 { 1740 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1741 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1742 int rc; 1743 1744 PPGMPOOLPAGE pShwPage; 1745 Assert(pVM->pgm.s.fNestedPaging); 1746 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT); 1747 PGM_LOCK_ASSERT_OWNER(pVM); 1748 1749 /* 1750 * PML4 level. 1751 */ 1752 { 1753 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 1754 Assert(pPml4); 1755 1756 /* Allocate page directory pointer table if not present. */ 1757 { 1758 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask; 1759 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK; 1760 PEPTPML4E pPml4e = &pPml4->a[iPml4e]; 1761 1762 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK))) 1763 { 1764 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK; 1765 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE, 1766 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/, 1767 &pShwPage); 1768 AssertRCReturn(rc, rc); 1769 1770 /* Hook up the new PDPT now. */ 1771 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags); 1772 } 1773 else 1774 { 1775 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK); 1776 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1777 1778 pgmPoolCacheUsed(pPool, pShwPage); 1779 1780 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */ 1781 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags)) 1782 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags); 1783 } 1784 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 1785 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e)); 1786 } 1787 } 1788 1789 /* 1790 * PDPT level. 1791 */ 1792 { 1793 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */ 1794 1795 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1796 if (ppPdpt) 1797 *ppPdpt = pPdpt; 1798 1799 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask; 1800 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK; 1801 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte]; 1802 1803 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK))) 1804 { 1805 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK; 1806 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 1807 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage); 1808 AssertRCReturn(rc, rc); 1809 1810 /* Hook up the new PD now. */ 1811 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags); 1812 } 1813 else 1814 { 1815 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK); 1816 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED); 1817 1818 pgmPoolCacheUsed(pPool, pShwPage); 1819 1820 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */ 1821 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags)) 1822 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags); 1823 } 1824 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 1825 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte)); 1826 1827 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 1828 } 1829 1830 return VINF_SUCCESS; 1831 } 1832 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 1833 1834 1672 1835 #ifdef IN_RING0 1673 1836 /** … … 1787 1950 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData; 1788 1951 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE); 1789 AssertReturn(g_aPgmBothModeData[idxBth].pfn MapCR3, VERR_PGM_MODE_IPE);1952 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE); 1790 1953 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu); 1791 1954 } … … 2135 2298 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd)); 2136 2299 2137 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;2300 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2138 2301 PPGMPAGE pPage; 2139 2302 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); … … 2176 2339 PGM_LOCK_VOID(pVM); 2177 2340 2178 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;2341 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2179 2342 PPGMPAGE pPage; 2180 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to2181 * guest-physical address here. */2182 2343 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); 2183 2344 if (RT_SUCCESS(rc)) … … 2272 2433 PGM_LOCK_VOID(pVM); 2273 2434 2274 RTGCPHYS GCPhysCR3 = p VCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;2435 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3); 2275 2436 PPGMPAGE pPage; 2276 2437 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); … … 2369 2530 2370 2531 2371 /**2372 * Gets the CR3 mask corresponding to the given paging mode.2373 *2374 * @returns The CR3 mask.2375 * @param enmMode The paging mode.2376 */2377 DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)2378 {2379 /** @todo This work can be optimized either by storing the masks in2380 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and2381 * store the result when entering guest mode since we currently use it only2382 * for enmGuestMode. */2383 switch (enmMode)2384 {2385 case PGMMODE_PAE:2386 case PGMMODE_PAE_NX:2387 return X86_CR3_PAE_PAGE_MASK;2388 case PGMMODE_AMD64:2389 case PGMMODE_AMD64_NX:2390 return X86_CR3_AMD64_PAGE_MASK;2391 case PGMMODE_EPT:2392 return X86_CR3_EPT_PAGE_MASK;2393 default:2394 return X86_CR3_PAGE_MASK;2395 }2396 }2397 2398 2399 /**2400 * Gets the masked CR3 value according to the current guest paging mode.2401 *2402 * @returns The masked PGM CR3 value.2403 * @param pVCpu The cross context virtual CPU structure.2404 * @param uCr3 The raw guest CR3 value.2405 */2406 DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)2407 {2408 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);2409 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);2410 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);2411 return GCPhysCR3;2412 }2413 2414 2415 2532 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2416 2533 /** … … 2429 2546 static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3) 2430 2547 { 2548 # if 0 2431 2549 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3) 2550 # endif 2432 2551 { 2433 2552 PGMPTWALK Walk; … … 2449 2568 } 2450 2569 2570 # if 0 2451 2571 /* 2452 2572 * If the nested-guest CR3 has not changed, then the previously … … 2455 2575 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3; 2456 2576 return VINF_SUCCESS; 2577 # endif 2457 2578 } 2458 2579 #endif … … 2495 2616 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode)) 2496 2617 { 2497 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));2498 2618 RTGCPHYS GCPhysOut; 2499 2619 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut); … … 2601 2721 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2602 2722 { 2603 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));2604 2723 RTGCPHYS GCPhysOut; 2605 2724 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut); … … 2610 2729 /* CR3 SLAT translation failed but we try to pretend it 2611 2730 succeeded for the reasons mentioned in PGMHCChangeMode(). */ 2612 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));2731 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc)); 2613 2732 int const rc2 = pgmGstUnmapCr3(pVCpu); 2614 2733 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 2615 2734 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS; 2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 2616 2736 return rc2; 2617 2737 } … … 2717 2837 succeeded for the reasons mentioned in PGMHCChangeMode(). */ 2718 2838 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2)); 2719 rc2 = pgmGstUnmapCr3(pVCpu);2720 2839 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 2721 2840 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS; … … 2811 2930 * PDPE entries. Here we assume the caller has already validated or doesn't require 2812 2931 * validation of the PDPEs. 2932 * 2933 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been 2934 * validated by the VMX transition. 2813 2935 * 2814 2936 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode". … … 2844 2966 } 2845 2967 2968 /* 2969 * Update CPUM with the PAE PDPEs. 2970 */ 2971 CPUMSetGuestPaePdpes(pVCpu, paPaePdpes); 2846 2972 return VINF_SUCCESS; 2847 2973 } … … 2868 2994 2869 2995 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2870 if (CPUMIsGuestVmxEptPa ePagingEnabled(pVCpu))2996 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 2871 2997 { 2872 2998 RTGCPHYS GCPhysOut; … … 2908 3034 2909 3035 /* 2910 * Update CPUM. 2911 * We do this prior to mapping the PDPEs to keep the order consistent 2912 * with what's used in HM. In practice, it doesn't really matter. 2913 */ 2914 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]); 2915 2916 /* 2917 * Map the PDPEs. 3036 * Map the PDPEs and update CPUM. 2918 3037 */ 2919 3038 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]); … … 3320 3439 3321 3440 /* 3441 * Determine SLAT mode -before- entering the new shadow mode! 3442 */ 3443 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT; 3444 3445 /* 3322 3446 * Enter new shadow mode (if changed). 3323 3447 */ … … 3380 3504 * - Indicate that the CR3 is nested-guest physical address. 3381 3505 */ 3382 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu))3506 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) 3383 3507 { 3384 3508 if (PGMMODE_WITH_PAGING(enmGuestMode)) … … 3405 3529 * See Intel spec. 27.2.1 "EPT Overview". 3406 3530 */ 3407 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));3531 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc)); 3408 3532 3409 3533 /* Trying to coax PGM to succeed for the time being... */ 3410 3534 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS); 3411 3535 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3; 3412 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;3413 3536 pVCpu->pgm.s.enmGuestMode = enmGuestMode; 3414 3537 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode); … … 3416 3539 } 3417 3540 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3; 3418 GCPhysCR3 = Walk.GCPhys; 3419 } 3420 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3541 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK; 3542 } 3421 3543 } 3422 3544 else 3423 {3424 3545 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS); 3425 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;3426 }3427 3546 #endif 3428 3547 … … 3953 4072 PGM_LOCK_VOID(pVM); 3954 4073 pVCpu->pgm.s.uEptPtr = uEptPtr; 4074 pVCpu->pgm.s.pGstEptPml4R3 = 0; 4075 pVCpu->pgm.s.pGstEptPml4R0 = 0; 3955 4076 PGM_UNLOCK(pVM); 3956 4077 } -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r96407 r96879 47 47 #ifndef IN_RING3 48 48 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 49 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested, 50 bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, bool *pfLockTaken); 49 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNestedFault, 50 bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk, bool *pfLockTaken); 51 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT 52 static void PGM_BTH_NAME(NestedSyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPte, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, 53 unsigned iPte, PPGMPTWALKGST pGstWalkAll); 54 static int PGM_BTH_NAME(NestedSyncPage)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNestedPage, RTGCPHYS GCPhysPage, unsigned cPages, 55 uint32_t uErr, PPGMPTWALKGST pGstWalkAll); 56 static int PGM_BTH_NAME(NestedSyncPT)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNestedPage, RTGCPHYS GCPhysPage, PPGMPTWALKGST pGstWalkAll); 57 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 51 58 #endif 52 59 PGM_BTH_DECL(int, InvalidatePage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage); … … 983 990 984 991 992 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) 993 /** 994 * Deals with a nested-guest \#PF fault for a guest-physical page with a handler. 995 * 996 * @returns Strict VBox status code. 997 * @param pVCpu The cross context virtual CPU structure. 998 * @param uErr The error code. 999 * @param pRegFrame The register frame. 1000 * @param GCPhysNestedFault The nested-guest physical address of the fault. 1001 * @param pPage The guest page at @a GCPhysNestedFault. 1002 * @param GCPhysFault The guest-physical address of the fault. 1003 * @param pGstWalkAll The guest page walk result. 1004 * @param pfLockTaken Where to store whether the PGM is still held when 1005 * this function completes. 1006 * 1007 * @note The caller has taken the PGM lock. 1008 */ 1009 static VBOXSTRICTRC PGM_BTH_NAME(NestedTrap0eHandlerDoAccessHandlers)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, 1010 RTGCPHYS GCPhysNestedFault, PPGMPAGE pPage, 1011 RTGCPHYS GCPhysFault, PPGMPTWALKGST pGstWalkAll, 1012 bool *pfLockTaken) 1013 { 1014 # if PGM_GST_TYPE == PGM_TYPE_PROT \ 1015 && PGM_SHW_TYPE == PGM_TYPE_EPT 1016 1017 /** @todo Assert uErr isn't X86_TRAP_PF_RSVD and remove release checks. */ 1018 PGM_A20_ASSERT_MASKED(pVCpu, GCPhysFault); 1019 AssertMsgReturn(PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage), ("%RGp %RGp uErr=%u\n", GCPhysNestedFault, GCPhysFault, uErr), 1020 VERR_PGM_HANDLER_IPE_1); 1021 1022 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1023 RTGCPHYS const GCPhysNestedPage = GCPhysNestedFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1024 RTGCPHYS const GCPhysPage = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1025 1026 /* 1027 * Physical page access handler. 1028 */ 1029 PPGMPHYSHANDLER pCur; 1030 VBOXSTRICTRC rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pCur); 1031 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 1032 1033 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur); 1034 Assert(pCurType); 1035 1036 /* 1037 * If the region is write protected and we got a page not present fault, then sync 1038 * the pages. If the fault was caused by a read, then restart the instruction. 1039 * In case of write access continue to the GC write handler. 1040 */ 1041 if ( !(uErr & X86_TRAP_PF_P) 1042 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE) 1043 { 1044 Log7Func(("Syncing Monitored: GCPhysNestedPage=%RGp GCPhysPage=%RGp uErr=%#x\n", GCPhysNestedPage, GCPhysPage, uErr)); 1045 rcStrict = PGM_BTH_NAME(NestedSyncPage)(pVCpu, GCPhysNestedPage, GCPhysPage, 1 /*cPages*/, uErr, pGstWalkAll); 1046 Assert(rcStrict != VINF_PGM_SYNCPAGE_MODIFIED_PDE); 1047 if ( RT_FAILURE(rcStrict) 1048 || !(uErr & X86_TRAP_PF_RW)) 1049 { 1050 AssertMsgRC(rcStrict, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1051 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersOutOfSync); 1052 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndPhys; }); 1053 return rcStrict; 1054 } 1055 } 1056 else if ( !(uErr & X86_TRAP_PF_RSVD) 1057 && pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE) 1058 { 1059 /* 1060 * If the access was NOT through an EPT misconfig (i.e. RSVD), sync the page. 1061 * This can happen for the VMX APIC-access page. 1062 */ 1063 Log7Func(("Syncing MMIO: GCPhysNestedPage=%RGp GCPhysPage=%RGp\n", GCPhysNestedPage, GCPhysPage)); 1064 rcStrict = PGM_BTH_NAME(NestedSyncPage)(pVCpu, GCPhysNestedPage, GCPhysPage, 1 /*cPages*/, uErr, pGstWalkAll); 1065 Assert(rcStrict != VINF_PGM_SYNCPAGE_MODIFIED_PDE); 1066 if (RT_FAILURE(rcStrict)) 1067 { 1068 AssertMsgRC(rcStrict, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1069 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersOutOfSync); 1070 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndPhys; }); 1071 return rcStrict; 1072 } 1073 } 1074 1075 AssertMsg( pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE 1076 || (pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE && (uErr & X86_TRAP_PF_RW)), 1077 ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enmKind=%d\n", 1078 GCPhysNestedFault, GCPhysFault, pPage, uErr, pCurType->enmKind)); 1079 if (pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE) 1080 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersPhysWrite); 1081 else 1082 { 1083 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersPhysAll); 1084 if (uErr & X86_TRAP_PF_RSVD) 1085 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersPhysAllOpt); 1086 } 1087 1088 if (pCurType->pfnPfHandler) 1089 { 1090 STAM_PROFILE_START(&pCur->Stat, h); 1091 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser 1092 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser); 1093 1094 if (pCurType->fKeepPgmLock) 1095 { 1096 rcStrict = pCurType->pfnPfHandler(pVM, pVCpu, uErr, pRegFrame, GCPhysNestedFault, GCPhysFault, uUser); 1097 STAM_PROFILE_STOP(&pCur->Stat, h); 1098 } 1099 else 1100 { 1101 PGM_UNLOCK(pVM); 1102 *pfLockTaken = false; 1103 rcStrict = pCurType->pfnPfHandler(pVM, pVCpu, uErr, pRegFrame, GCPhysNestedFault, GCPhysFault, uUser); 1104 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */ 1105 } 1106 } 1107 else 1108 { 1109 AssertMsgFailed(("What's going on here!? Fault falls outside handler range!?\n")); 1110 rcStrict = VINF_EM_RAW_EMULATE_INSTR; 1111 } 1112 1113 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2HndPhys; }); 1114 return rcStrict; 1115 1116 # else 1117 RT_NOREF8(pVCpu, uErr, pRegFrame, GCPhysNestedFault, pPage, GCPhysFault, pGstWalkAll, pfLockTaken); 1118 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 1119 return VERR_PGM_NOT_USED_IN_MODE; 1120 # endif 1121 } 1122 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 1123 1124 985 1125 /** 986 1126 * Nested \#PF handler for nested-guest hardware-assisted execution using nested … … 991 1131 * @param uErr The fault error (X86_TRAP_PF_*). 992 1132 * @param pRegFrame The register frame. 993 * @param GCPhysNested The nested-guest physical address being accessed.1133 * @param GCPhysNestedFault The nested-guest physical address of the fault. 994 1134 * @param fIsLinearAddrValid Whether translation of a nested-guest linear address 995 * caused this fault. If @c false, GCPtrNested must be 996 * 0. 997 * @param GCPtrNested The nested-guest linear address that caused this 998 * fault. 1135 * caused this fault. If @c false, GCPtrNestedFault 1136 * must be 0. 1137 * @param GCPtrNestedFault The nested-guest linear address of this fault. 999 1138 * @param pWalk The guest page table walk result. 1000 1139 * @param pfLockTaken Where to store whether the PGM lock is still held 1001 1140 * when this function completes. 1002 1141 */ 1003 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNested ,1004 bool fIsLinearAddrValid, RTGCPTR GCPtrNested , PPGMPTWALK pWalk, bool *pfLockTaken)1142 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysNestedFault, 1143 bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk, bool *pfLockTaken) 1005 1144 { 1006 1145 *pfLockTaken = false; 1007 1146 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) \ 1008 && ( PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_32BIT \ 1009 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 1147 && PGM_GST_TYPE == PGM_TYPE_PROT \ 1010 1148 && PGM_SHW_TYPE == PGM_TYPE_EPT 1011 1149 1012 1150 Assert(CPUMIsGuestVmxEptPagingEnabled(pVCpu)); 1151 Assert(PGM_A20_IS_ENABLED(pVCpu)); 1152 1153 /* We don't support mode-based execute control for EPT yet. */ 1154 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt); 1155 Assert(!(uErr & X86_TRAP_PF_US)); 1156 1157 /* Take the big lock now. */ 1158 *pfLockTaken = true; 1159 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1160 PGM_LOCK_VOID(pVM); 1013 1161 1014 1162 /* 1015 1163 * Walk the guest EPT tables and check if it's an EPT violation or misconfiguration. 1016 1164 */ 1165 Log7Func(("cs:rip=%04x:%#RX64 GCPhysNestedFault=%RGp\n", pRegFrame->cs.Sel, pRegFrame->rip, GCPhysNestedFault)); 1017 1166 PGMPTWALKGST GstWalkAll; 1018 int rc = pgmGstSlatWalk(pVCpu, GCPhysNested , fIsLinearAddrValid, GCPtrNested, pWalk, &GstWalkAll);1167 int rc = pgmGstSlatWalk(pVCpu, GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault, pWalk, &GstWalkAll); 1019 1168 if (RT_FAILURE(rc)) 1020 1169 return rc; … … 1022 1171 Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT); 1023 1172 Assert(pWalk->fSucceeded); 1024 Assert(pWalk->fEffective & PGM_PTATTRS_R_MASK);1173 Assert(pWalk->fEffective & (PGM_PTATTRS_EPT_R_MASK | PGM_PTATTRS_EPT_W_MASK | PGM_PTATTRS_EPT_X_SUPER_MASK)); 1025 1174 Assert(pWalk->fIsSlat); 1026 1175 1027 if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)) 1028 { 1029 if ( ( (uErr & X86_TRAP_PF_RW) 1030 && !(pWalk->fEffective & PGM_PTATTRS_W_MASK) 1031 && ( (uErr & X86_TRAP_PF_US) 1032 || CPUMIsGuestR0WriteProtEnabled(pVCpu)) ) 1033 || ((uErr & X86_TRAP_PF_US) && !(pWalk->fEffective & PGM_PTATTRS_US_MASK)) 1034 || ((uErr & X86_TRAP_PF_ID) && (pWalk->fEffective & PGM_PTATTRS_NX_MASK)) 1035 ) 1036 return VERR_ACCESS_DENIED; 1037 } 1038 1039 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1040 RTGCPHYS const GCPhysFault = PGM_A20_APPLY(pVCpu, GCPhysNested & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK); 1041 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A }; 1042 1043 /* Take the big lock now. */ 1044 *pfLockTaken = true; 1045 PGM_LOCK_VOID(pVM); 1046 1047 /* 1048 * Check if this is an APIC-access page access (VMX specific). 1049 */ 1050 RTGCPHYS const GCPhysApicAccess = CPUMGetGuestVmxApicAccessPageAddr(pVCpu); 1051 if ((pWalk->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysApicAccess) 1052 { 1053 PPGMPAGE pPage; 1054 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, GCPhysApicAccess), &pPage); 1055 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1056 { 1057 rc = VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pWalk->GCPhys, pPage, 1058 pfLockTaken)); 1059 return rc; 1060 } 1061 } 1062 1063 # ifdef PGM_WITH_MMIO_OPTIMIZATIONS 1064 /* 1065 * Check if this is an MMIO access. 1066 */ 1067 if (uErr & X86_TRAP_PF_RSVD) 1068 { 1069 PPGMPAGE pPage; 1070 rc = pgmPhysGetPageEx(pVM, PGM_A20_APPLY(pVCpu, (RTGCPHYS)GCPhysFault), &pPage); 1071 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 1072 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage, 1073 pfLockTaken)); 1074 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr); 1075 AssertRC(rc); 1076 HMInvalidatePhysPage(pVM, GCPhysFault); 1077 return rc; /* Restart with the corrected entry. */ 1078 } 1079 # endif /* PGM_WITH_MMIO_OPTIMIZATIONS */ 1080 1081 /* 1082 * Fetch the guest EPT page directory pointer. 1083 */ 1084 const unsigned iPDDst = ((GCPhysFault >> SHW_PD_SHIFT) & SHW_PD_MASK); 1085 PEPTPD pPDDst; 1086 rc = pgmShwGetEPTPDPtr(pVCpu, GCPhysFault, NULL /* ppPdpt */, &pPDDst); 1087 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS); 1088 Assert(pPDDst); 1176 /* Paranoia: Remove later. */ 1177 Assert(RT_BOOL(pWalk->fEffective & PGM_PTATTRS_R_MASK) == RT_BOOL(pWalk->fEffective & PGM_PTATTRS_EPT_R_MASK)); 1178 Assert(RT_BOOL(pWalk->fEffective & PGM_PTATTRS_W_MASK) == RT_BOOL(pWalk->fEffective & PGM_PTATTRS_EPT_W_MASK)); 1179 Assert(RT_BOOL(pWalk->fEffective & PGM_PTATTRS_NX_MASK) == !RT_BOOL(pWalk->fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK)); 1180 1181 /* 1182 * Check page-access permissions. 1183 */ 1184 if ( ((uErr & X86_TRAP_PF_RW) && !(pWalk->fEffective & PGM_PTATTRS_W_MASK)) 1185 || ((uErr & X86_TRAP_PF_ID) && (pWalk->fEffective & PGM_PTATTRS_NX_MASK))) 1186 { 1187 Log7Func(("Permission failed! GCPtrNested=%RGv GCPhysNested=%RGp uErr=%#x fEffective=%#RX64\n", GCPtrNestedFault, 1188 GCPhysNestedFault, uErr, pWalk->fEffective)); 1189 pWalk->fFailed = PGM_WALKFAIL_EPT_VIOLATION; 1190 return VERR_ACCESS_DENIED; 1191 } 1192 1193 PGM_A20_ASSERT_MASKED(pVCpu, pWalk->GCPhys); 1194 RTGCPHYS const GCPhysPage = pWalk->GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1195 RTGCPHYS const GCPhysNestedPage = GCPhysNestedFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1196 1197 /* 1198 * If we were called via an EPT misconfig, it should've already resulted in a nested-guest VM-exit. 1199 */ 1200 AssertMsgReturn(!(uErr & X86_TRAP_PF_RSVD), 1201 ("Unexpected EPT misconfig VM-exit. GCPhysPage=%RGp GCPhysNestedPage=%RGp\n", GCPhysPage, GCPhysNestedPage), 1202 VERR_PGM_MAPPING_IPE); 1203 1204 /* 1205 * Fetch and sync the nested-guest EPT page directory pointer. 1206 */ 1207 PEPTPD pEptPd; 1208 rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL /*ppPdpt*/, &pEptPd, &GstWalkAll); 1209 AssertRCReturn(rc, rc); 1210 Assert(pEptPd); 1089 1211 1090 1212 /* … … 1093 1215 * It is IMPORTANT that we weed out any access to non-present shadow PDEs 1094 1216 * here so we can safely assume that the shadow PT is present when calling 1095 * SyncPage later.1217 * NestedSyncPage later. 1096 1218 * 1097 * On failure, we ASSUME that SyncPT is out of memory or detected some kind 1098 * of mapping conflict and defer to SyncCR3 in R3. 1099 * (Again, we do NOT support access handlers for non-present guest pages.) 1100 * 1101 */ 1102 if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */ 1103 && !SHW_PDE_IS_P(pPDDst->a[iPDDst])) 1219 * NOTE: It's possible we will be syncing the VMX APIC-access page here. 1220 * In that case, we would sync the page but will NOT go ahead with emulating 1221 * the APIC-access VM-exit through IEM. However, once the page is mapped in 1222 * the shadow tables, subsequent APIC-access VM-exits for the nested-guest 1223 * will be triggered by hardware. Maybe calling the IEM #PF handler can be 1224 * considered as an optimization later. 1225 */ 1226 unsigned const iPde = (GCPhysNestedPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 1227 if ( !(uErr & X86_TRAP_PF_P) 1228 && !(pEptPd->a[iPde].u & EPT_PRESENT_MASK)) 1104 1229 { 1105 1230 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2SyncPT; }); 1106 Log Flow(("=>SyncPT GCPhysFault=%RGp\n", GCPhysFault));1107 rc = PGM_BTH_NAME( SyncPT)(pVCpu, 0 /* iPDSrc */, NULL /* pPDSrc */, GCPhysFault);1231 Log7Func(("NestedSyncPT: Lazy. GCPhysNestedPage=%RGp GCPhysPage=%RGp\n", GCPhysNestedPage, GCPhysPage)); 1232 rc = PGM_BTH_NAME(NestedSyncPT)(pVCpu, GCPhysNestedPage, GCPhysPage, &GstWalkAll); 1108 1233 if (RT_SUCCESS(rc)) 1109 1234 return rc; 1110 Log(("SyncPT: %RGp failed!! rc=%Rrc\n", GCPhysFault, rc)); 1111 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */ 1112 return VINF_PGM_SYNC_CR3; 1113 } 1114 1115 /* 1116 * Check if this fault address is flagged for special treatment, 1117 * which means we'll have to figure out the physical address and 1118 * check flags associated with it. 1235 AssertMsgFailedReturn(("NestedSyncPT: %RGv failed! rc=%Rrc\n", GCPhysNestedPage, rc), VERR_PGM_MAPPING_IPE); 1236 } 1237 1238 /* 1239 * Check if this fault address is flagged for special treatment. 1240 * This handles faults on an MMIO or write-monitored page. 1119 1241 * 1120 * ASSUME that we can limit any special access handling to pages 1121 * in page tables which the guest believes to be present. 1242 * If this happens to be the VMX APIC-access page, we sync it in the shadow tables 1243 * and emulate the APIC-access VM-exit by calling IEM's VMX APIC-access #PF handler 1244 * registered for the page. Once the page is mapped in the shadow tables, subsequent 1245 * APIC-access VM-exits for the nested-guest will be triggered by hardware. 1122 1246 */ 1123 1247 PPGMPAGE pPage; 1124 rc = pgmPhysGetPageEx(pVM, GCPhysFault, &pPage); 1125 if (RT_FAILURE(rc)) 1126 { 1127 /* 1128 * When the guest accesses invalid physical memory (e.g. probing 1129 * of RAM or accessing a remapped MMIO range), then we'll fall 1130 * back to the recompiler to emulate the instruction. 1131 */ 1132 LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhysFault, rc)); 1133 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eHandlersInvalid); 1134 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2InvalidPhys; }); 1135 return VINF_EM_RAW_EMULATE_INSTR; 1136 } 1137 1138 /* 1139 * Any handlers for this page? 1140 */ 1248 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1249 AssertRCReturn(rc, rc); 1141 1250 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1142 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysFault, pPage, 1143 pfLockTaken)); 1144 1145 /* 1146 * We are here only if page is present in Guest page tables and 1147 * trap is not handled by our handlers. 1148 * 1149 * Check it for page out-of-sync situation. 1251 { 1252 Log7Func(("MMIO: Calling NestedTrap0eHandlerDoAccessHandlers for GCPhys %RGp\n", GCPhysPage)); 1253 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(NestedTrap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, GCPhysNestedFault, 1254 pPage, pWalk->GCPhys, &GstWalkAll, 1255 pfLockTaken)); 1256 } 1257 1258 /* 1259 * We are here only if page is present in nested-guest page tables but the 1260 * trap is not handled by our handlers. Check for page out-of-sync situation. 1150 1261 */ 1151 1262 if (!(uErr & X86_TRAP_PF_P)) 1152 1263 { 1153 /* 1154 * Page is not present in our page tables. Try to sync it! 1155 */ 1156 if (uErr & X86_TRAP_PF_US) 1157 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUser)); 1158 else /* supervisor */ 1159 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor)); 1160 1161 if (PGM_PAGE_IS_BALLOONED(pPage)) 1162 { 1163 /* Emulate reads from ballooned pages as they are not present in 1164 our shadow page tables. (Required for e.g. Solaris guests; soft 1165 ecc, random nr generator.) */ 1166 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, GCPhysFault)); 1167 LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage)); 1168 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncBallloon)); 1169 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Ballooned; }); 1170 return rc; 1171 } 1172 1173 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, PGM_SYNC_NR_PAGES, uErr); 1264 Assert(!PGM_PAGE_IS_BALLOONED(pPage)); 1265 Assert(!(uErr & X86_TRAP_PF_US)); /* Mode-based execute not supported yet. */ 1266 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor)); 1267 1268 Log7Func(("SyncPage: Not-Present: GCPhysNestedPage=%RGp GCPhysPage=%RGp\n", GCPhysNestedFault, GCPhysPage)); 1269 rc = PGM_BTH_NAME(NestedSyncPage)(pVCpu, GCPhysNestedPage, GCPhysPage, PGM_SYNC_NR_PAGES, uErr, &GstWalkAll); 1174 1270 if (RT_SUCCESS(rc)) 1175 1271 { 1176 /* The page was successfully synced, return to the guest. */1177 1272 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSync; }); 1178 1273 return VINF_SUCCESS; 1179 1274 } 1180 1275 } 1181 else 1276 else if (uErr & X86_TRAP_PF_RW) 1182 1277 { 1183 1278 /* 1184 1279 * Write protected pages are made writable when the guest makes the 1185 * first write to it. 1280 * first write to it. This happens for pages that are shared, write 1186 1281 * monitored or not yet allocated. 1187 1282 * … … 1192 1287 * Assume for now it only applies to the read/write flag. 1193 1288 */ 1194 if (uErr & X86_TRAP_PF_RW) 1195 { 1196 /* 1197 * Check if it is a read-only page. 1198 */ 1199 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1289 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 1290 { 1291 /* This is a read-only page. */ 1292 AssertMsgFailed(("Failed\n")); 1293 1294 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1295 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhysPage)); 1296 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2MakeWritable; }); 1297 1298 Log7Func(("Calling pgmPhysPageMakeWritable for GCPhysPage=%RGp\n", GCPhysPage)); 1299 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhysPage); 1300 if (rc != VINF_SUCCESS) 1200 1301 { 1201 Assert(!PGM_PAGE_IS_ZERO(pPage)); 1202 AssertFatalMsg(!PGM_PAGE_IS_BALLOONED(pPage), ("Unexpected ballooned page at %RGp\n", GCPhysFault)); 1203 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2MakeWritable; }); 1204 1205 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhysFault); 1206 if (rc != VINF_SUCCESS) 1207 { 1208 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); 1209 return rc; 1210 } 1211 if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))) 1212 return VINF_EM_NO_MEMORY; 1302 AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc)); 1303 return rc; 1213 1304 } 1214 1215 if (uErr & X86_TRAP_PF_US) 1216 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUserWrite)); 1217 else 1218 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite)); 1219 1220 /* 1221 * Sync the page. 1222 * 1223 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the 1224 * page is not present, which is not true in this case. 1225 */ 1226 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, GCPhysFault, 1, uErr); 1227 if (RT_SUCCESS(rc)) 1228 { 1229 /* 1230 * Page was successfully synced, return to guest but invalidate 1231 * the TLB first as the page is very likely to be in it. 1232 */ 1233 HMInvalidatePhysPage(pVM, GCPhysFault); 1234 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndObs; }); 1235 return VINF_SUCCESS; 1236 } 1237 } 1238 } 1239 1240 /* 1241 * If we get here it is because something failed above, i.e. most like guru meditation time. 1242 */ 1243 LogRelFunc(("returns rc=%Rrc GCPhysFault=%RGp uErr=%RX64 cs:rip=%04x:%08RX64\n", rc, GCPhysFault, (uint64_t)uErr, 1244 pRegFrame->cs.Sel, pRegFrame->rip)); 1245 return rc; 1305 if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))) 1306 return VINF_EM_NO_MEMORY; 1307 } 1308 1309 Assert(!(uErr & X86_TRAP_PF_US)); /* Mode-based execute not supported yet. */ 1310 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisorWrite)); 1311 1312 /* 1313 * Sync the write-protected page. 1314 * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the 1315 * page is not present, which is not true in this case. 1316 */ 1317 Log7Func(("SyncPage: RW: cs:rip=%04x:%#RX64 GCPhysNestedPage=%RGp uErr=%#RX32 GCPhysPage=%RGp WalkGCPhys=%RGp\n", 1318 pRegFrame->cs.Sel, pRegFrame->rip, GCPhysNestedPage, (uint32_t)uErr, GCPhysPage, pWalk->GCPhys)); 1319 rc = PGM_BTH_NAME(NestedSyncPage)(pVCpu, GCPhysNestedPage, GCPhysPage, 1 /* cPages */, uErr, &GstWalkAll); 1320 if (RT_SUCCESS(rc)) 1321 { 1322 HMInvalidatePhysPage(pVM, GCPhysPage); 1323 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2OutOfSyncHndObs; }); 1324 return VINF_SUCCESS; 1325 } 1326 } 1327 1328 /* 1329 * If we get here it is because something failed above => guru meditation time. 1330 */ 1331 LogRelFunc(("GCPhysNestedFault=%#RGp (%#RGp) uErr=%#RX32 cs:rip=%04x:%08RX64\n", rc, GCPhysNestedFault, GCPhysPage, 1332 (uint32_t)uErr, pRegFrame->cs.Sel, pRegFrame->rip)); 1333 return VERR_PGM_MAPPING_IPE; 1246 1334 1247 1335 # else 1248 RT_NOREF7(pVCpu, uErr, pRegFrame, GCPhysNested , fIsLinearAddrValid, GCPtrNested, pWalk);1336 RT_NOREF7(pVCpu, uErr, pRegFrame, GCPhysNestedFault, fIsLinearAddrValid, GCPtrNestedFault, pWalk); 1249 1337 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 1250 1338 return VERR_PGM_NOT_USED_IN_MODE; … … 1658 1746 * 1659 1747 * @param pVM The cross context VM structure. 1748 * @param pVCpu The cross context virtual CPU structure. 1660 1749 * @param pPage The page in question. 1750 * @param GCPhysPage The guest-physical address of the page. 1661 1751 * @param fPteSrc The shadowed flags of the source PTE. Must include the 1662 1752 * A (accessed) bit so it can be emulated correctly. … … 1664 1754 * does not need to be set atomically. 1665 1755 */ 1666 DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVMCC pVM, PCPGMPAGE pPage, uint64_t fPteSrc, PSHWPTE pPteDst) 1756 DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVMCC pVM, PVMCPUCC pVCpu, PCPGMPAGE pPage, RTGCPHYS GCPhysPage, uint64_t fPteSrc, 1757 PSHWPTE pPteDst) 1667 1758 { 1668 NOREF(pVM); RT_NOREF_PV(fPteSrc);1759 RT_NOREF_PV(pVM); RT_NOREF_PV(fPteSrc); RT_NOREF_PV(pVCpu); RT_NOREF_PV(GCPhysPage); 1669 1760 1670 1761 /** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. … … 1695 1786 ) 1696 1787 { 1788 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && PGM_SHW_TYPE == PGM_TYPE_EPT 1789 /* 1790 * If an "ALL" access handler has been registered for the VMX APIC-access page, 1791 * we want to ensure EPT violations are triggered rather than EPT misconfigs 1792 * as the former allows us to translate it to an APIC-access VM-exit. This is a 1793 * weird case because this is not an MMIO page (it's regular guest RAM) but we 1794 * want to treat it as an MMIO page wrt to trapping all accesses but we only 1795 * want EPT violations for the reasons state above. 1796 * 1797 * NOTE! This is required even when the nested-hypervisor is not using EPT! 1798 */ 1799 if (CPUMIsGuestVmxApicAccessPageAddr(pVCpu, GCPhysPage)) 1800 { 1801 Log7Func(("SyncHandlerPte: VMX APIC-access page at %#RGp -> marking not present\n", GCPhysPage)); 1802 pPteDst->u = PGM_PAGE_GET_HCPHYS(pPage); 1803 return; 1804 } 1805 # endif 1806 1697 1807 LogFlow(("SyncHandlerPte: MMIO page -> invalid \n")); 1698 1808 # if PGM_SHW_TYPE == PGM_TYPE_EPT … … 1826 1936 # endif 1827 1937 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1828 PGM_BTH_NAME(SyncHandlerPte)(pVM, p Page, fGstShwPteFlags, &PteDst);1938 PGM_BTH_NAME(SyncHandlerPte)(pVM, pVCpu, pPage, GCPhysPage, fGstShwPteFlags, &PteDst); 1829 1939 else 1830 1940 { … … 2197 2307 SHWPTE PteDst; 2198 2308 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2199 PGM_BTH_NAME(SyncHandlerPte)(pVM, p Page, GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, PdeSrc), &PteDst);2309 PGM_BTH_NAME(SyncHandlerPte)(pVM, pVCpu, pPage, GCPhys, GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, PdeSrc), &PteDst); 2200 2310 else 2201 2311 SHW_PTE_SET(PteDst, GST_GET_BIG_PDE_SHW_FLAGS_4_PTE(pVCpu, PdeSrc) | PGM_PAGE_GET_HCPHYS(pPage)); … … 2409 2519 2410 2520 #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */ 2521 2522 2523 #if !defined(IN_RING3) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT 2524 /** 2525 * Sync a shadow page for a nested-guest page. 2526 * 2527 * @param pVCpu The cross context virtual CPU structure. 2528 * @param pPte The shadow page table entry. 2529 * @param GCPhysPage The guest-physical address of the page. 2530 * @param pShwPage The shadow page of the page table. 2531 * @param iPte The index of the page table entry. 2532 * @param pGstWalkAll The guest page table walk result. 2533 * 2534 * @note Not to be used for 2/4MB pages! 2535 */ 2536 static void PGM_BTH_NAME(NestedSyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPte, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, 2537 unsigned iPte, PPGMPTWALKGST pGstWalkAll) 2538 { 2539 PGM_A20_ASSERT_MASKED(pVCpu, GCPhysPage); 2540 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 2541 Assert(!pShwPage->fDirty); 2542 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT); 2543 2544 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2545 AssertMsg(GCPhysPage == (pGstWalkAll->u.Ept.Pte.u & EPT_PTE_PG_MASK), 2546 ("GCPhys=%RGp Ept=%RX64\n", GCPhysPage, pGstWalkAll->u.Ept.Pte.u & EPT_PTE_PG_MASK)); 2547 2548 /* 2549 * Find the ram range. 2550 */ 2551 PPGMPAGE pPage; 2552 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 2553 AssertRCReturnVoid(rc); 2554 2555 Assert(!PGM_PAGE_IS_BALLOONED(pPage)); 2556 2557 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC 2558 /* Make the page writable if necessary. */ 2559 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM 2560 && PGM_PAGE_IS_ZERO(pPage) 2561 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED 2562 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES 2563 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED 2564 # endif 2565 # ifdef VBOX_WITH_PAGE_SHARING 2566 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_SHARED 2567 # endif 2568 ) 2569 { 2570 AssertMsgFailed(("GCPhysPage=%RGp\n", GCPhysPage)); /** @todo Shouldn't happen but if it does deal with it later. */ 2571 } 2572 # endif 2573 2574 /* 2575 * Make page table entry. 2576 */ 2577 SHWPTE Pte; 2578 uint64_t const fGstShwPteFlags = pGstWalkAll->u.Ept.Pte.u & pVCpu->pgm.s.fGstEptShadowedPteMask; 2579 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2580 { 2581 if (CPUMIsGuestVmxApicAccessPageAddr(pVCpu, GCPhysPage)) 2582 { 2583 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2584 Log7Func(("APIC-access page at %RGp -> shadowing nested-hypervisor %RX64 (%RGp)\n", GCPhysPage, fGstShwPteFlags, pShwPage->GCPhys)); 2585 } 2586 else if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 2587 { 2588 Assert(!CPUMIsGuestVmxApicAccessPageAddr(pVCpu, GCPhysPage)); 2589 if (fGstShwPteFlags & EPT_E_WRITE) 2590 { 2591 PGMHandlerPhysicalPageTempOff(pVCpu->CTX_SUFF(pVM), GCPhysPage, GCPhysPage); 2592 Log7Func(("monitored page (%R[pgmpage]) at %RGp -> read-write, monitoring disabled\n", pPage, GCPhysPage)); 2593 } 2594 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2595 Log7Func(("monitored page (%R[pgmpage]) at %RGp -> shadowing nested-hypervisor %RX64\n", pPage, GCPhysPage, fGstShwPteFlags)); 2596 } 2597 else 2598 { 2599 /** @todo Track using fVirtVmxApicAccess bit in PGMPHYSHANDLER and maybe in PGMPAGE 2600 * too? */ 2601 PGMHandlerPhysicalDeregister(pVCpu->CTX_SUFF(pVM), GCPhysPage); 2602 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2603 Log7Func(("MMIO at %RGp potentially former VMX APIC-access page -> unregistered\n", GCPhysPage)); 2604 } 2605 } 2606 else 2607 Pte.u = PGM_PAGE_GET_HCPHYS(pPage) | fGstShwPteFlags; 2608 2609 /* Make sure only allocated pages are mapped writable. */ 2610 Assert(!SHW_PTE_IS_P_RW(Pte) || PGM_PAGE_IS_ALLOCATED(pPage)); 2611 2612 /* 2613 * Keep user track up to date. 2614 */ 2615 if (SHW_PTE_IS_P(Pte)) 2616 { 2617 if (!SHW_PTE_IS_P(*pPte)) 2618 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPte); 2619 else if (SHW_PTE_GET_HCPHYS(*pPte) != SHW_PTE_GET_HCPHYS(Pte)) 2620 { 2621 Log2(("SyncPageWorker: deref! *pPte=%RX64 Pte=%RX64\n", SHW_PTE_LOG64(*pPte), SHW_PTE_LOG64(Pte))); 2622 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPte), iPte, NIL_RTGCPHYS); 2623 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPte); 2624 } 2625 } 2626 else if (SHW_PTE_IS_P(*pPte)) 2627 { 2628 Log2(("SyncPageWorker: deref! *pPte=%RX64\n", SHW_PTE_LOG64(*pPte))); 2629 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, SHW_PTE_GET_HCPHYS(*pPte), iPte, NIL_RTGCPHYS); 2630 } 2631 2632 /* 2633 * Commit the entry. 2634 */ 2635 SHW_PTE_ATOMIC_SET2(*pPte, Pte); 2636 return; 2637 } 2638 2639 2640 /** 2641 * Syncs a nested-guest page. 2642 * 2643 * There are no conflicts at this point, neither is there any need for 2644 * page table allocations. 2645 * 2646 * @returns VBox status code. 2647 * @param pVCpu The cross context virtual CPU structure. 2648 * @param GCPhysNestedPage The nested-guest physical address of the page being 2649 * synced. 2650 * @param GCPhysPage The guest-physical address of the page being synced. 2651 * @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1). 2652 * @param uErr The page fault error (X86_TRAP_PF_XXX). 2653 * @param pGstWalkAll The guest page table walk result. 2654 */ 2655 static int PGM_BTH_NAME(NestedSyncPage)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNestedPage, RTGCPHYS GCPhysPage, unsigned cPages, 2656 uint32_t uErr, PPGMPTWALKGST pGstWalkAll) 2657 { 2658 PGM_A20_ASSERT_MASKED(pVCpu, GCPhysPage); 2659 Assert(!(GCPhysNestedPage & GUEST_PAGE_OFFSET_MASK)); 2660 Assert(!(GCPhysPage & GUEST_PAGE_OFFSET_MASK)); 2661 2662 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2663 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 2664 Log7Func(("GCPhysNestedPage=%RGv GCPhysPage=%RGp cPages=%u uErr=%#x\n", GCPhysNestedPage, GCPhysPage, cPages, uErr)); 2665 RT_NOREF_PV(uErr); RT_NOREF_PV(cPages); 2666 2667 PGM_LOCK_ASSERT_OWNER(pVM); 2668 2669 /* 2670 * Get the shadow PDE, find the shadow page table in the pool. 2671 */ 2672 unsigned const iPde = ((GCPhysNestedPage >> EPT_PD_SHIFT) & EPT_PD_MASK); 2673 PEPTPD pPd; 2674 int rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL, &pPd, pGstWalkAll); 2675 if (RT_SUCCESS(rc)) 2676 { /* likely */ } 2677 else 2678 { 2679 Log(("Failed to fetch EPT PD for %RGp (%RGp) rc=%Rrc\n", GCPhysNestedPage, GCPhysPage, rc)); 2680 return rc; 2681 } 2682 Assert(pPd); 2683 EPTPDE Pde = pPd->a[iPde]; 2684 2685 # if 0 /* Enable this later? */ 2686 /* In the guest SMP case we could have blocked while another VCPU reused this page table. */ 2687 if (!SHW_PDE_IS_P(Pde)) 2688 { 2689 AssertMsg(pVM->cCpus > 1, ("Unexpected missing PDE %RX64\n", (uint64_t)Pde.u)); 2690 Log7Func(("CPU%d: SyncPage: Pde at %RGp changed behind our back!\n", pVCpu->idCpu, GCPhysNestedPage)); 2691 return VINF_SUCCESS; /* force the instruction to be executed again. */ 2692 } 2693 2694 /* Can happen in the guest SMP case; other VCPU activated this PDE while we were blocking to handle the page fault. */ 2695 if (SHW_PDE_IS_BIG(Pde)) 2696 { 2697 Assert(pVM->pgm.s.fNestedPaging); 2698 Log7Func(("CPU%d: SyncPage: %RGp changed behind our back!\n", pVCpu->idCpu, GCPhysNestedPage)); 2699 return VINF_SUCCESS; 2700 } 2701 # else 2702 AssertMsg(SHW_PDE_IS_P(Pde), ("Pde=%RX64 iPde=%u\n", Pde.u, iPde)); 2703 Assert(!SHW_PDE_IS_BIG(Pde)); 2704 # endif 2705 2706 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, Pde.u & EPT_PDE_PG_MASK); 2707 PEPTPT pPt = (PEPTPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 2708 2709 Assert(cPages == 1 || !(uErr & X86_TRAP_PF_P)); 2710 # ifdef PGM_SYNC_N_PAGES 2711 if ( cPages > 1 2712 && !(uErr & X86_TRAP_PF_P) 2713 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) 2714 { 2715 /* 2716 * This code path is currently only taken for non-present pages! 2717 * 2718 * We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and 2719 * deal with locality. 2720 */ 2721 unsigned iPte = (GCPhysNestedPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 2722 unsigned const iPteEnd = RT_MIN(iPte + PGM_SYNC_NR_PAGES / 2, RT_ELEMENTS(pPt->a)); 2723 if (iPte < PGM_SYNC_NR_PAGES / 2) 2724 iPte = 0; 2725 else 2726 iPte -= PGM_SYNC_NR_PAGES / 2; 2727 for (; iPte < iPteEnd; iPte++) 2728 { 2729 if (!SHW_PTE_IS_P(pPt->a[iPte])) 2730 { 2731 PGMPTWALKGST GstWalkPt; 2732 PGMPTWALK WalkPt; 2733 GCPhysNestedPage &= ~(SHW_PT_MASK << SHW_PT_SHIFT); 2734 GCPhysNestedPage |= (iPte << GUEST_PAGE_SHIFT); 2735 rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNested*/, &WalkPt, 2736 &GstWalkPt); 2737 if (RT_SUCCESS(rc)) 2738 PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPte], WalkPt.GCPhys, pShwPage, iPte, &GstWalkPt); 2739 else 2740 { 2741 /* 2742 * This could be MMIO pages reserved by the nested-hypevisor or genuinely not-present pages. 2743 * Ensure the shadow tables entry is not-present. 2744 */ 2745 /** @todo Potential room for optimization (explained in NestedSyncPT). */ 2746 AssertMsg(!pPt->a[iPte].u, ("%RX64\n", pPt->a[iPte].u)); 2747 } 2748 Log7Func(("Many: %RGp iPte=%u ShwPte=%RX64\n", GCPhysNestedPage, iPte, SHW_PTE_LOG64(pPt->a[iPte]))); 2749 if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))) 2750 break; 2751 } 2752 else 2753 { 2754 # ifdef VBOX_STRICT 2755 /* Paranoia - Verify address of the page is what it should be. */ 2756 PGMPTWALKGST GstWalkPt; 2757 PGMPTWALK WalkPt; 2758 GCPhysNestedPage &= ~(SHW_PT_MASK << SHW_PT_SHIFT); 2759 GCPhysNestedPage |= (iPte << GUEST_PAGE_SHIFT); 2760 rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNested*/, &WalkPt, &GstWalkPt); 2761 AssertRC(rc); 2762 PPGMPAGE pPage; 2763 rc = pgmPhysGetPageEx(pVM, WalkPt.GCPhys, &pPage); 2764 AssertRC(rc); 2765 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == SHW_PTE_GET_HCPHYS(pPt->a[iPte]), 2766 ("PGM page and shadow PTE address conflict. GCPhysNestedPage=%RGp GCPhysPage=%RGp HCPhys=%RHp Shw=%RHp\n", 2767 GCPhysNestedPage, WalkPt.GCPhys, PGM_PAGE_GET_HCPHYS(pPage), SHW_PTE_GET_HCPHYS(pPt->a[iPte]))); 2768 # endif 2769 Log7Func(("Many3: %RGp iPte=%u ShwPte=%RX64\n", GCPhysNestedPage, iPte, SHW_PTE_LOG64(pPt->a[iPte]))); 2770 } 2771 } 2772 } 2773 else 2774 # endif /* PGM_SYNC_N_PAGES */ 2775 { 2776 unsigned const iPte = (GCPhysNestedPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 2777 PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPte], GCPhysPage, pShwPage, iPte, pGstWalkAll); 2778 Log7Func(("4K: GCPhysPage=%RGp iPte=%u ShwPte=%08llx\n", GCPhysPage, iPte, SHW_PTE_LOG64(pPt->a[iPte]))); 2779 } 2780 2781 return VINF_SUCCESS; 2782 } 2783 2784 2785 /** 2786 * Sync a shadow page table for a nested-guest page table. 2787 * 2788 * The shadow page table is not present in the shadow PDE. 2789 * 2790 * Handles mapping conflicts. 2791 * 2792 * A precondition for this method is that the shadow PDE is not present. The 2793 * caller must take the PGM lock before checking this and continue to hold it 2794 * when calling this method. 2795 * 2796 * @returns VBox status code. 2797 * @param pVCpu The cross context virtual CPU structure. 2798 * @param GCPhysNestedPage The nested-guest physical page address of the page 2799 * being synced. 2800 * @param GCPhysPage The guest-physical address of the page being synced. 2801 * @param pGstWalkAll The guest page table walk result. 2802 */ 2803 static int PGM_BTH_NAME(NestedSyncPT)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNestedPage, RTGCPHYS GCPhysPage, PPGMPTWALKGST pGstWalkAll) 2804 { 2805 PGM_A20_ASSERT_MASKED(pVCpu, GCPhysPage); 2806 Assert(!(GCPhysNestedPage & GUEST_PAGE_OFFSET_MASK)); 2807 Assert(!(GCPhysPage & GUEST_PAGE_OFFSET_MASK)); 2808 2809 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2810 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2811 2812 Log7Func(("GCPhysNestedPage=%RGp GCPhysPage=%RGp\n", GCPhysNestedPage, GCPhysPage)); 2813 2814 PGM_LOCK_ASSERT_OWNER(pVM); 2815 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2816 2817 PEPTPD pPd; 2818 PEPTPDPT pPdpt; 2819 unsigned const iPde = (GCPhysNestedPage >> EPT_PD_SHIFT) & EPT_PD_MASK; 2820 int rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, &pPdpt, &pPd, pGstWalkAll); 2821 if (rc != VINF_SUCCESS) 2822 { 2823 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2824 AssertRC(rc); 2825 return rc; 2826 } 2827 Assert(pPd); 2828 PSHWPDE pPde = &pPd->a[iPde]; 2829 2830 unsigned const iPdpt = (GCPhysNestedPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK; 2831 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdpt->a[iPdpt].u & EPT_PDPTE_PG_MASK); 2832 Assert(pShwPde->enmKind == PGMPOOLKIND_EPT_PD_FOR_EPT_PD); 2833 2834 SHWPDE Pde = *pPde; 2835 Assert(!SHW_PDE_IS_P(Pde)); /* We're only supposed to call SyncPT on PDE!P and conflicts. */ 2836 2837 # ifdef PGM_WITH_LARGE_PAGES 2838 if (BTH_IS_NP_ACTIVE(pVM)) 2839 { 2840 /* Check if we allocated a big page before for this 2 MB range and disable it. */ 2841 PPGMPAGE pPage; 2842 rc = pgmPhysGetPageEx(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK, &pPage); 2843 if ( RT_SUCCESS(rc) 2844 && PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE) 2845 { 2846 Log7Func(("Disabling large page %RGp\n", GCPhysPage)); 2847 Assert(PGM_A20_IS_ENABLED(pVCpu)); /* Should never be in A20M mode in VMX operation. */ 2848 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED); 2849 pVM->pgm.s.cLargePagesDisabled++; 2850 } 2851 } 2852 # endif /* PGM_WITH_LARGE_PAGES */ 2853 2854 /* 2855 * Allocate & map the page table. 2856 */ 2857 PSHWPT pPt; 2858 PPGMPOOLPAGE pShwPage; 2859 2860 RTGCPHYS const GCPhysPt = pGstWalkAll->u.Ept.Pde.u & EPT_PDE_PG_MASK; 2861 rc = pgmPoolAlloc(pVM, GCPhysPt, PGMPOOLKIND_EPT_PT_FOR_EPT_PT, PGMPOOLACCESS_DONTCARE, 2862 PGM_A20_IS_ENABLED(pVCpu), pShwPde->idx, iPde, false /*fLockPage*/, &pShwPage); 2863 if ( rc == VINF_SUCCESS 2864 || rc == VINF_PGM_CACHED_PAGE) 2865 { /* likely */ } 2866 else 2867 { 2868 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2869 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS); 2870 } 2871 2872 pPt = (PSHWPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage); 2873 Assert(pPt); 2874 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage)); 2875 2876 if (rc == VINF_SUCCESS) 2877 { 2878 /* Sync the page we've already translated through SLAT. */ 2879 const unsigned iPte = (GCPhysNestedPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 2880 Assert((pGstWalkAll->u.Ept.Pte.u & EPT_PTE_PG_MASK) == GCPhysPage); 2881 PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPte], GCPhysPage, pShwPage, iPte, pGstWalkAll); 2882 Log7Func(("GstPte=%RGp ShwPte=%RX64 iPte=%u\n", pGstWalkAll->u.Ept.Pte.u, pPt->a[iPte].u, iPte)); 2883 2884 /* Sync the rest of page table (expensive but might be cheaper than nested-guest VM-exits in hardware). */ 2885 for (unsigned iPteCur = 0; iPteCur < RT_ELEMENTS(pPt->a); iPteCur++) 2886 { 2887 if (iPteCur != iPte) 2888 { 2889 PGMPTWALKGST GstWalkPt; 2890 PGMPTWALK WalkPt; 2891 GCPhysNestedPage &= ~(SHW_PT_MASK << SHW_PT_SHIFT); 2892 GCPhysNestedPage |= (iPteCur << GUEST_PAGE_SHIFT); 2893 int const rc2 = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNested*/, 2894 &WalkPt, &GstWalkPt); 2895 if (RT_SUCCESS(rc2)) 2896 { 2897 PGM_BTH_NAME(NestedSyncPageWorker)(pVCpu, &pPt->a[iPteCur], WalkPt.GCPhys, pShwPage, iPteCur, &GstWalkPt); 2898 Log7Func(("GstPte=%RGp ShwPte=%RX64 iPte=%u\n", GstWalkPt.u.Ept.Pte.u, pPt->a[iPteCur].u, iPteCur)); 2899 } 2900 else 2901 { 2902 /* 2903 * This could be MMIO pages reserved by the nested-hypevisor or genuinely not-present pages. 2904 * Ensure the shadow tables entry is not-present. 2905 */ 2906 /** @todo We currently don't sync. them to cause EPT misconfigs and trap all of them 2907 * using EPT violation and walk the guest EPT tables to determine EPT 2908 * misconfigs VM-exits for the nested-guest. In the future we could optimize 2909 * this by using a specific combination of reserved bits which we can 2910 * immediately identify as EPT misconfigs for the nested-guest without having 2911 * to walk its EPT tables. Tracking non-present entries might be tricky... 2912 */ 2913 AssertMsg(!pPt->a[iPteCur].u, ("%RX64\n", pPt->a[iPteCur].u)); 2914 } 2915 if (RT_UNLIKELY(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))) 2916 break; 2917 } 2918 } 2919 } 2920 else 2921 { 2922 Assert(rc == VINF_PGM_CACHED_PAGE); 2923 # ifdef VBOX_STRICT 2924 /* Paranoia - Verify address of the page is what it should be. */ 2925 PPGMPAGE pPage; 2926 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 2927 AssertRC(rc); 2928 const unsigned iPte = (GCPhysNestedPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 2929 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == SHW_PTE_GET_HCPHYS(pPt->a[iPte]), 2930 ("PGM page and shadow PTE address conflict. GCPhysNestedPage=%RGp GCPhysPage=%RGp Page=%RHp Shw=%RHp\n", 2931 GCPhysNestedPage, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage), SHW_PTE_GET_HCPHYS(pPt->a[iPte]))); 2932 Log7Func(("GstPte=%RGp ShwPte=%RX64 iPte=%u [cache]\n", pGstWalkAll->u.Ept.Pte.u, pPt->a[iPte].u, iPte)); 2933 # endif 2934 rc = VINF_SUCCESS; /* Cached entry; assume it's still fully valid. */ 2935 } 2936 2937 /* Save the new PDE. */ 2938 uint64_t const fShwPdeFlags = pGstWalkAll->u.Ept.Pde.u & pVCpu->pgm.s.fGstEptShadowedPdeMask; 2939 AssertReturn(!(pGstWalkAll->u.Ept.Pde.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* Implement this later. */ 2940 Pde.u = pShwPage->Core.Key | fShwPdeFlags; 2941 SHW_PDE_ATOMIC_SET2(*pPde, Pde); 2942 Log7Func(("GstPde=%RGp ShwPde=%RX64 iPde=%u\n", pGstWalkAll->u.Ept.Pde.u, pPde->u, iPde)); 2943 2944 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT), a); 2945 return rc; 2946 } 2947 #endif /* !IN_RING3 && VBOX_WITH_NESTED_HWVIRT_VMX_EPT && PGM_SHW_TYPE == PGM_TYPE_EPT*/ 2948 2949 2411 2950 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE 2412 2951 … … 2934 3473 2935 3474 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2936 PGM_BTH_NAME(SyncHandlerPte)(pVM, p Page, SHW_PTE_GET_U(PteDstBase), &PteDst);3475 PGM_BTH_NAME(SyncHandlerPte)(pVM, pVCpu, pPage, GCPhys, SHW_PTE_GET_U(PteDstBase), &PteDst); 2937 3476 else if (PGM_PAGE_IS_BALLOONED(pPage)) 2938 3477 SHW_PTE_SET(PteDst, 0); /* Handle ballooned pages at #PF time. */ … … 4392 4931 X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES]; 4393 4932 memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes)); 4394 CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);4395 4933 PGMGstMapPaePdpes(pVCpu, &aGstPaePdpes[0]); 4396 4934 … … 4513 5051 #endif 4514 5052 4515 /* 4516 * Update second-level address translation info. 4517 */ 4518 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 4519 pVCpu->pgm.s.pGstEptPml4R3 = 0; 4520 pVCpu->pgm.s.pGstEptPml4R0 = 0; 4521 #endif 4522 5053 /** @todo This should probably be moved inside \#if PGM_GST_TYPE == PGM_TYPE_PAE? */ 4523 5054 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false; 4524 5055 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false; -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r96407 r96879 191 191 (RTGCPTR)(CTXTYPE(RTGCPTR, uintptr_t, RTGCPTR))(uintptr_t)pvAddress, GCPhysFault, cbWrite)); 192 192 193 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 194 Log7Func(("%RGv phys=%RGp cbWrite=%d\n", 195 (RTGCPTR)(CTXTYPE(RTGCPTR, uintptr_t, RTGCPTR))(uintptr_t)pvAddress, GCPhysFault, cbWrite)); 196 193 197 for (;;) 194 198 { 195 199 union 196 200 { 197 201 void *pv; 198 202 PX86PT pPT; … … 202 206 PX86PDPT pPDPT; 203 207 PX86PML4 pPML4; 204 } uShw; 208 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 209 PEPTPDPT pEptPdpt; 210 PEPTPD pEptPd; 211 PEPTPT pEptPt; 212 #endif 213 } uShw; 205 214 206 215 LogFlow(("pgmPoolMonitorChainChanging: page idx=%d phys=%RGp (next=%d) kind=%s write=%#x\n", … … 563 572 } 564 573 574 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 575 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 576 { 577 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 578 const unsigned iShw = off / sizeof(EPTPML4E); 579 X86PGPAEUINT const uPml4e = uShw.pPML4->a[iShw].u; 580 if (uPml4e & EPT_PRESENT_MASK) 581 { 582 Log7Func(("PML4 iShw=%#x: %RX64 (%RGp) -> freeing it!\n", iShw, uPml4e, pPage->GCPhys)); 583 pgmPoolFree(pVM, uPml4e & X86_PML4E_PG_MASK, pPage->idx, iShw); 584 ASMAtomicWriteU64(&uShw.pPML4->a[iShw].u, 0); 585 } 586 587 /* paranoia / a bit assumptive. */ 588 if ( (off & 7) 589 && (off & 7) + cbWrite > sizeof(X86PML4E)) 590 { 591 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E); 592 X86PGPAEUINT const uPml4e2 = uShw.pPML4->a[iShw2].u; 593 if (uPml4e2 & EPT_PRESENT_MASK) 594 { 595 Log7Func(("PML4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uPml4e2)); 596 pgmPoolFree(pVM, uPml4e2 & X86_PML4E_PG_MASK, pPage->idx, iShw2); 597 ASMAtomicWriteU64(&uShw.pPML4->a[iShw2].u, 0); 598 } 599 } 600 break; 601 } 602 603 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 604 { 605 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 606 const unsigned iShw = off / sizeof(EPTPDPTE); 607 X86PGPAEUINT const uPdpte = uShw.pEptPdpt->a[iShw].u; 608 if (uPdpte & EPT_PRESENT_MASK) 609 { 610 Log7Func(("EPT PDPT iShw=%#x: %RX64 (%RGp) -> freeing it!\n", iShw, uPdpte, pPage->GCPhys)); 611 pgmPoolFree(pVM, uPdpte & EPT_PDPTE_PG_MASK, pPage->idx, iShw); 612 ASMAtomicWriteU64(&uShw.pEptPdpt->a[iShw].u, 0); 613 } 614 615 /* paranoia / a bit assumptive. */ 616 if ( (off & 7) 617 && (off & 7) + cbWrite > sizeof(EPTPDPTE)) 618 { 619 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(EPTPDPTE); 620 X86PGPAEUINT const uPdpte2 = uShw.pEptPdpt->a[iShw2].u; 621 if (uPdpte2 & EPT_PRESENT_MASK) 622 { 623 Log7Func(("EPT PDPT iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uPdpte2)); 624 pgmPoolFree(pVM, uPdpte2 & EPT_PDPTE_PG_MASK, pPage->idx, iShw2); 625 ASMAtomicWriteU64(&uShw.pEptPdpt->a[iShw2].u, 0); 626 } 627 } 628 break; 629 } 630 631 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 632 { 633 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 634 const unsigned iShw = off / sizeof(EPTPDE); 635 X86PGPAEUINT const uPde = uShw.pEptPd->a[iShw].u; 636 if (uPde & EPT_PRESENT_MASK) 637 { 638 Log7Func(("EPT PD iShw=%#x: %RX64 (%RGp) -> freeing it!\n", iShw, uPde, pPage->GCPhys)); 639 pgmPoolFree(pVM, uPde & EPT_PDE_PG_MASK, pPage->idx, iShw); 640 ASMAtomicWriteU64(&uShw.pEptPd->a[iShw].u, 0); 641 } 642 643 /* paranoia / a bit assumptive. */ 644 if ( (off & 7) 645 && (off & 7) + cbWrite > sizeof(EPTPDE)) 646 { 647 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(EPTPDE); 648 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pEptPd->a)); 649 X86PGPAEUINT const uPde2 = uShw.pEptPd->a[iShw2].u; 650 if (uPde2 & EPT_PRESENT_MASK) 651 { 652 Log7Func(("EPT PD (2): iShw2=%#x: %RX64 (%RGp) -> freeing it!\n", iShw2, uPde2, pPage->GCPhys)); 653 pgmPoolFree(pVM, uPde2 & EPT_PDE_PG_MASK, pPage->idx, iShw2); 654 ASMAtomicWriteU64(&uShw.pEptPd->a[iShw2].u, 0); 655 } 656 } 657 break; 658 } 659 660 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 661 { 662 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage); 663 const unsigned iShw = off / sizeof(EPTPTE); 664 X86PGPAEUINT const uPte = uShw.pEptPt->a[iShw].u; 665 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT)); 666 if (uPte & EPT_PRESENT_MASK) 667 { 668 EPTPTE GstPte; 669 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte)); 670 AssertRC(rc); 671 672 Log7Func(("EPT PT: iShw=%#x %RX64 (%RGp)\n", iShw, uPte, pPage->GCPhys)); 673 pgmPoolTracDerefGCPhysHint(pPool, pPage, 674 uShw.pEptPt->a[iShw].u & EPT_PTE_PG_MASK, 675 GstPte.u & EPT_PTE_PG_MASK, 676 iShw); 677 ASMAtomicWriteU64(&uShw.pEptPt->a[iShw].u, 0); 678 } 679 680 /* paranoia / a bit assumptive. */ 681 if ( (off & 7) 682 && (off & 7) + cbWrite > sizeof(EPTPTE)) 683 { 684 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(EPTPTE); 685 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pEptPt->a)); 686 X86PGPAEUINT const uPte2 = uShw.pEptPt->a[iShw2].u; 687 if (uPte2 & EPT_PRESENT_MASK) 688 { 689 EPTPTE GstPte; 690 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, 691 pvAddress ? (uint8_t const *)pvAddress + sizeof(GstPte) : NULL, 692 GCPhysFault + sizeof(GstPte), sizeof(GstPte)); 693 AssertRC(rc); 694 Log7Func(("EPT PT (2): iShw=%#x %RX64 (%RGp)\n", iShw2, uPte2, pPage->GCPhys)); 695 pgmPoolTracDerefGCPhysHint(pPool, pPage, 696 uShw.pEptPt->a[iShw2].u & EPT_PTE_PG_MASK, 697 GstPte.u & EPT_PTE_PG_MASK, 698 iShw2); 699 ASMAtomicWriteU64(&uShw.pEptPt->a[iShw2].u, 0); 700 } 701 } 702 break; 703 } 704 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 705 565 706 default: 566 707 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind)); … … 959 1100 RT_NOREF_PV(uErrorCode); 960 1101 1102 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1103 AssertMsg(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT, 1104 ("pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); 1105 # endif 961 1106 LogFlow(("pgmRZPoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); 962 1107 … … 973 1118 if (pPage->fDirty) 974 1119 { 1120 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1121 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); 1122 # endif 975 1123 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH)); 976 1124 PGM_UNLOCK(pVM); … … 988 1136 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); 989 1137 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw); 1138 } 1139 # endif 1140 1141 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1142 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 1143 { 1144 Assert(!CPUMIsGuestInVmxNonRootMode(CPUMQueryGuestCtxPtr(pVCpu))); 1145 Log7Func(("Flushing pvFault=%RGv GCPhysFault=%RGp\n", pvFault, GCPhysFault)); 1146 pgmPoolMonitorChainFlush(pPool, pPage); 1147 PGM_UNLOCK(pVM); 1148 return VINF_SUCCESS; 990 1149 } 991 1150 # endif … … 1237 1396 && fReused) 1238 1397 { 1398 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); /* temporary, remove later. */ 1239 1399 /* Make sure that the current instruction still has shadow page backing, otherwise we'll end up in a loop. */ 1240 1400 if (PGMShwGetPage(pVCpu, pRegFrame->rip, NULL, NULL) == VINF_SUCCESS) … … 1681 1841 (PCX86PTPAE)&pPool->aDirtyPages[idxSlot].aPage[0], fAllowRemoval, &fFlush); 1682 1842 else 1843 { 1844 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); /* temporary, remove later. */ 1683 1845 cChanges = pgmPoolTrackFlushPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst, 1684 1846 (PCX86PT)&pPool->aDirtyPages[idxSlot].aPage[0], fAllowRemoval, &fFlush); 1847 } 1685 1848 1686 1849 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); … … 1728 1891 AssertCompile(RT_ELEMENTS(pPool->aDirtyPages) == 8 || RT_ELEMENTS(pPool->aDirtyPages) == 16); 1729 1892 Assert(!pPage->fDirty); 1893 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); 1730 1894 1731 1895 unsigned idxFree = pPool->idxFreeDirtyPage; … … 2104 2268 case PGMPOOLKIND_32BIT_PD: 2105 2269 case PGMPOOLKIND_PAE_PDPT: 2270 Assert(!PGMPOOL_PAGE_IS_KIND_NESTED(enmKind2)); 2106 2271 switch (enmKind2) 2107 2272 { … … 2133 2298 case PGMPOOLKIND_64BIT_PML4: 2134 2299 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB: 2300 Assert(!PGMPOOL_PAGE_IS_KIND_NESTED(enmKind2)); 2135 2301 switch (enmKind2) 2136 2302 { … … 2155 2321 } 2156 2322 2323 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2324 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 2325 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 2326 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 2327 return PGMPOOL_PAGE_IS_KIND_NESTED(enmKind2); 2328 2329 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 2330 return false; 2331 #endif 2332 2157 2333 /* 2158 2334 * These cannot be flushed, and it's common to reuse the PDs as PTs. … … 2368 2544 case PGMPOOLKIND_32BIT_PD: 2369 2545 case PGMPOOLKIND_PAE_PDPT: 2546 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2547 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 2548 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 2549 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 2550 #endif 2370 2551 { 2371 2552 /* find the head */ … … 2394 2575 case PGMPOOLKIND_32BIT_PD_PHYS: 2395 2576 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 2577 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2578 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 2579 #endif 2396 2580 break; 2397 2581 default: … … 2459 2643 /* Nothing to monitor here. */ 2460 2644 return VINF_SUCCESS; 2645 2646 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2647 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 2648 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 2649 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 2650 break; 2651 2652 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 2653 /* Nothing to monitor here. */ 2654 return VINF_SUCCESS; 2655 #endif 2656 2461 2657 default: 2462 2658 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind)); … … 2484 2680 pPageHead->iMonitoredNext = pPage->idx; 2485 2681 rc = VINF_SUCCESS; 2682 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 2683 Log7Func(("Adding to monitoring list GCPhysPage=%RGp\n", pPage->GCPhys)); 2486 2684 } 2487 2685 else 2488 2686 { 2687 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 2688 Log7Func(("Started monitoring GCPhysPage=%RGp HCPhys=%RHp enmKind=%s\n", pPage->GCPhys, pPage->Core.Key, pgmPoolPoolKindToStr(pPage->enmKind))); 2689 2489 2690 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); 2490 2691 PVMCC pVM = pPool->CTX_SUFF(pVM); … … 2551 2752 return VINF_SUCCESS; 2552 2753 2754 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 2755 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 2756 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 2757 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 2758 break; 2759 2760 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 2761 /* Nothing to monitor here. */ 2762 Assert(!pPage->fMonitored); 2763 return VINF_SUCCESS; 2764 #endif 2765 2553 2766 default: 2554 2767 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind)); … … 2599 2812 */ 2600 2813 pgmPoolMonitorModifiedRemove(pPool, pPage); 2814 2815 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 2816 Log7Func(("Stopped monitoring %RGp\n", pPage->GCPhys)); 2601 2817 2602 2818 return rc; … … 3173 3389 break; 3174 3390 default: 3175 /* (shouldn't be here, will assert below)*/3391 /* We will end up here when called with an "ALL" access handler. */ 3176 3392 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3177 3393 break; … … 3197 3413 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY) 3198 3414 Pte.u &= ~(X86PGUINT)X86_PTE_RW; /* need to disallow writes when dirty bit tracking is still active. */ 3415 3199 3416 ASMAtomicWriteU32(&pPT->a[iPte].u, Pte.u); 3200 3417 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT); … … 3220 3437 case PGMPOOLKIND_PAE_PT_FOR_PHYS: 3221 3438 case PGMPOOLKIND_EPT_PT_FOR_PHYS: /* physical mask the same as PAE; RW bit as well; be careful! */ 3439 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3440 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 3441 #endif 3222 3442 { 3223 3443 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; … … 3246 3466 3247 3467 default: 3248 /* (shouldn't be here, will assert below)*/3468 /* We will end up here when called with an "ALL" access handler. */ 3249 3469 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3250 3470 break; … … 3594 3814 && pPage->cPresent) 3595 3815 { 3816 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); /* see if it hits */ 3596 3817 switch (pPage->enmKind) 3597 3818 { … … 3790 4011 break; 3791 4012 4013 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 4014 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 4015 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 4016 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 4017 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 4018 Assert(iUserTable < EPT_PG_ENTRIES); 4019 break; 4020 # endif 4021 3792 4022 default: 3793 AssertMsgFailed(("enmKind=%d \n", pUserPage->enmKind));4023 AssertMsgFailed(("enmKind=%d GCPhys=%RGp\n", pUserPage->enmKind, pPage->GCPhys)); 3794 4024 break; 3795 4025 } … … 3825 4055 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS: 3826 4056 case PGMPOOLKIND_EPT_PD_FOR_PHYS: 4057 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 4058 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 4059 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 4060 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 4061 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 4062 #endif 3827 4063 ASMAtomicWriteU64(&u.pau64[iUserTable], 0); 3828 4064 break; … … 4204 4440 } 4205 4441 4206 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp; found page has HCPhys=%RHp \n",4207 HCPhys, GCPhys, PGM_PAGE_GET_HCPHYS(pPhysPage) ));4442 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp; found page has HCPhys=%RHp iPte=%u fIsNested=%RTbool\n", 4443 HCPhys, GCPhys, PGM_PAGE_GET_HCPHYS(pPhysPage), iPte, PGMPOOL_PAGE_IS_NESTED(pPage))); 4208 4444 } 4209 4445 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys)); … … 4438 4674 } 4439 4675 4676 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 4677 /** 4678 * Clears references to shadowed pages in a SLAT EPT page table. 4679 * 4680 * @param pPool The pool. 4681 * @param pPage The page. 4682 * @param pShwPT The shadow page table (mapping of the page). 4683 * @param pGstPT The guest page table. 4684 */ 4685 DECLINLINE(void) pgmPoolTrackDerefNestedPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPT pShwPT, PCEPTPT pGstPT) 4686 { 4687 Assert(PGMPOOL_PAGE_IS_NESTED(pPage)); 4688 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++) 4689 { 4690 X86PGPAEUINT const uShwPte = pShwPT->a[i].u; 4691 Assert((uShwPte & UINT64_C(0xfff0000000000f80)) == 0); /* Access, Dirty, UserX (not supported) and ignored bits 7, 11. */ 4692 if (uShwPte & EPT_PRESENT_MASK) 4693 { 4694 Log7Func(("Shw=%RX64 GstPte=%RX64\n", uShwPte, pGstPT->a[i].u)); 4695 pgmPoolTracDerefGCPhys(pPool, pPage, uShwPte & EPT_PTE_PG_MASK, pGstPT->a[i].u & EPT_PTE_PG_MASK, i); 4696 if (!pPage->cPresent) 4697 break; 4698 } 4699 } 4700 } 4701 4702 4703 # if 0 4704 /** 4705 * Clears refernces to shadowed pages in a SLAT EPT PM4 table. 4706 * 4707 * @param pPool The pool. 4708 * @param pPage The page. 4709 * @param pShwPml4 The shadow PML4 table. 4710 */ 4711 DECLINLINE(void) pgmPoolTrackDerefNestedPML4EPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPML4 pShwPml4) 4712 { 4713 /** @todo later merge this with 64-bit PML and pass the assert and present masks as 4714 * parameters. */ 4715 Assert(PGMPOOL_PAGE_IS_NESTED(pPage)); 4716 for (unsigned i = 0; i < RT_ELEMENTS(pShwPml4->a); i++) 4717 { 4718 X86PGPAEUINT const uPml4e = pShwPml4->a[i].u; 4719 Assert((uPml4e & (EPT_PML4E_MBZ_MASK | UINT64_C(0xfff0000000000000))) == 0); 4720 if (uPml4e & EPT_PRESENT_MASK) 4721 { 4722 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, uPml4e & EPT_PML4E_PG_MASK); 4723 if (pSubPage) 4724 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 4725 else 4726 AssertFatalMsgFailed(("%RX64\n", uPml4e & EPT_PML4E_PG_MASK)); 4727 } 4728 } 4729 } 4730 # endif 4731 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 4440 4732 4441 4733 /** … … 4591 4883 { 4592 4884 X86PGPAEUINT const uPde = pShwPD->a[i].u; 4593 Assert((uPde & UINT64_C(0xfff0000000000f80)) == 0); 4885 #ifdef PGM_WITH_LARGE_PAGES 4886 AssertMsg((uPde & UINT64_C(0xfff0000000000f00)) == 0, ("uPde=%RX64\n", uPde)); 4887 #else 4888 AssertMsg((uPde & UINT64_C(0xfff0000000000f80)) == 0, ("uPde=%RX64\n", uPde)); 4889 #endif 4594 4890 if (uPde & EPT_E_READ) 4595 4891 { … … 4599 4895 Log4(("pgmPoolTrackDerefPDEPT: i=%d pde=%RX64 GCPhys=%RX64\n", 4600 4896 i, uPde & EPT_PDE2M_PG_MASK, pPage->GCPhys)); 4897 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); /* We don't support large guest EPT yet. */ 4601 4898 pgmPoolTracDerefGCPhys(pPool, pPage, uPde & EPT_PDE2M_PG_MASK, 4602 4899 pPage->GCPhys + i * 2 * _1M /* pPage->GCPhys = base address of the memory described by the PD */, … … 4756 5053 break; 4757 5054 5055 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 5056 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 5057 { 5058 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g); 5059 void *pvGst; 5060 int const rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 5061 pgmPoolTrackDerefNestedPTEPT(pPool, pPage, (PEPTPT)pvShw, (PCEPTPT)pvGst); 5062 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst); 5063 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); 5064 break; 5065 } 5066 5067 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 5068 pgmPoolTrackDerefPDEPT(pPool, pPage, (PEPTPD)pvShw); 5069 break; 5070 5071 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 5072 pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw); 5073 break; 5074 5075 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 5076 //pgmPoolTrackDerefNestedPML4EPT(pPool, pPage, (PEPTPML4)pvShw); 5077 RT_FALL_THRU(); 5078 #endif 5079 4758 5080 default: 4759 AssertFatalMsgFailed(("enmKind=%d \n", pPage->enmKind));5081 AssertFatalMsgFailed(("enmKind=%d GCPhys=%RGp\n", pPage->enmKind, pPage->GCPhys)); 4760 5082 } 4761 5083 … … 4790 5112 LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n", 4791 5113 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys)); 5114 5115 if (PGMPOOL_PAGE_IS_NESTED(pPage)) 5116 Log7Func(("pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n", 5117 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys)); 4792 5118 4793 5119 /* … … 4989 5315 * (TRPMR3SyncIDT) because of FF priority. Try fix that? 4990 5316 * Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */ 5317 5318 #if defined(VBOX_STRICT) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) 5319 PVMCPUCC pVCpu = VMMGetCpu(pVM); 5320 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT || PGMPOOL_PAGE_IS_KIND_NESTED(enmKind)); 5321 #endif 4991 5322 4992 5323 PGM_LOCK_VOID(pVM); … … 5200 5531 if (pPage->GCPhys - GCPhys < PAGE_SIZE) 5201 5532 { 5533 Assert(!PGMPOOL_PAGE_IS_NESTED(pPage)); /* Temporary to see if it hits. Remove later. */ 5202 5534 switch (pPage->enmKind) 5203 5535 { … … 5503 5835 case PGMPOOLKIND_ROOT_NESTED: 5504 5836 return "PGMPOOLKIND_ROOT_NESTED"; 5837 case PGMPOOLKIND_EPT_PT_FOR_EPT_PT: 5838 return "PGMPOOLKIND_EPT_PT_FOR_EPT_PT"; 5839 case PGMPOOLKIND_EPT_PD_FOR_EPT_PD: 5840 return "PGMPOOLKIND_EPT_PD_FOR_EPT_PD"; 5841 case PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT: 5842 return "PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT"; 5843 case PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4: 5844 return "PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4"; 5505 5845 } 5506 5846 return "Unknown kind!"; -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r96407 r96879 238 238 a different shadow paging root/mode in both cases. */ 239 239 RTGCPHYS GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62); 240 PGMPOOLKIND enmKind = PGMPOOLKIND_ROOT_NESTED; 241 # elif defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) 242 RTGCPHYS GCPhysCR3; 243 PGMPOOLKIND enmKind; 244 if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_EPT) 245 { 246 GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode); 247 enmKind = PGMPOOLKIND_ROOT_NESTED; 248 } 249 else 250 { 251 GCPhysCR3 = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK; 252 enmKind = PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4; 253 } 240 254 # else 241 255 RTGCPHYS GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode); 256 PGMPOOLKIND const enmKind = PGMPOOLKIND_ROOT_NESTED; 242 257 # endif 243 258 PPGMPOOLPAGE pNewShwPageCR3; … … 250 265 PGM_LOCK_VOID(pVM); 251 266 252 int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),267 int rc = pgmPoolAlloc(pVM, GCPhysCR3, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 253 268 NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/, 254 269 &pNewShwPageCR3); … … 283 298 284 299 PGM_LOCK_VOID(pVM); 300 301 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT 302 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) 303 pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); 304 # endif 285 305 286 306 /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case. … … 372 392 373 393 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 394 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT); 374 395 PEPTPD pPDDst; 375 396 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst); … … 527 548 528 549 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 550 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT); 529 551 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK); 530 552 PEPTPD pPDDst; … … 546 568 return VERR_PAGE_TABLE_NOT_PRESENT; 547 569 548 AssertFatal (!SHW_PDE_IS_BIG(Pde));570 AssertFatalMsg(!SHW_PDE_IS_BIG(Pde), ("Pde=%#RX64\n", (uint64_t)Pde.u)); 549 571 550 572 /* … … 569 591 /** @todo Some CSAM code path might end up here and upset 570 592 * the page pool. */ 571 Assert Failed();593 AssertMsgFailed(("NewPte=%#RX64 OrgPte=%#RX64 GCPtr=%#RGv\n", SHW_PTE_LOG64(NewPte), SHW_PTE_LOG64(OrgPte), GCPtr)); 572 594 } 573 595 else if ( SHW_PTE_IS_RW(NewPte)
Note:
See TracChangeset
for help on using the changeset viewer.