Changeset 92426 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Nov 15, 2021 1:25:47 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r91580 r92426 1426 1426 } 1427 1427 1428 RTGCPHYS GCPhys;1429 uint64_t fFlags;1430 i nt rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);1431 if (RT_SUCCESS(rc)) { /* probable */ }1428 PGMPTWALK Walk; 1429 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk); 1430 if (RT_SUCCESS(rc)) 1431 Assert(Walk.fSucceeded); /* probable. */ 1432 1432 else 1433 1433 { … … 1435 1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc); 1436 1436 } 1437 if (( fFlags& X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }1437 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ } 1438 1438 else 1439 1439 { … … 1441 1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1442 1442 } 1443 if (!( fFlags& X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }1443 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ } 1444 1444 else 1445 1445 { … … 1447 1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1448 1448 } 1449 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;1449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK); 1450 1450 /** @todo Check reserved bits and such stuff. PGM is better at doing 1451 1451 * that, so do it when implementing the guest virtual address … … 1744 1744 { 1745 1745 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1746 RTGCPHYS GCPhys; 1747 uint64_t fFlags; 1748 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys); 1746 PGMPTWALK Walk; 1747 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk); 1749 1748 if (RT_FAILURE(rc)) 1750 1749 { … … 1754 1753 1755 1754 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1); 1755 Assert(Walk.fSucceeded); 1756 1756 pTlbe->uTag = uTag; 1757 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX); 1758 pTlbe->GCPhys = GCPhys; 1757 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) 1758 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX); 1759 pTlbe->GCPhys = Walk.GCPhys; 1759 1760 pTlbe->pbMappingR3 = NULL; 1760 1761 } … … 1961 1962 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 1962 1963 1963 RTGCPHYS GCPhys; 1964 uint64_t fFlags; 1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys); 1964 PGMPTWALK Walk; 1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk); 1966 1966 if (RT_FAILURE(rc)) 1967 1967 { … … 1969 1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc); 1970 1970 } 1971 if (!( fFlags& X86_PTE_US) && pVCpu->iem.s.uCpl == 3)1971 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3) 1972 1972 { 1973 1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext)); 1974 1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1975 1975 } 1976 if (( fFlags& X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))1976 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) 1977 1977 { 1978 1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); 1979 1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1980 1980 } 1981 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;1981 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK); 1982 1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode)); 1983 1983 /** @todo Check reserved bits and such stuff. PGM is better at doing … … 8137 8137 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault 8138 8138 * here. */ 8139 RTGCPHYS GCPhys; 8140 uint64_t fFlags; 8141 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys); 8139 PGMPTWALK Walk; 8140 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk); 8142 8141 if (RT_FAILURE(rc)) 8143 8142 { … … 8151 8150 /* If the page is writable and does not have the no-exec bit set, all 8152 8151 access is allowed. Otherwise we'll have to check more carefully... */ 8153 if (( fFlags& (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))8152 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)) 8154 8153 { 8155 8154 /* Write to read only memory? */ 8156 8155 if ( (fAccess & IEM_ACCESS_TYPE_WRITE) 8157 && !( fFlags& X86_PTE_RW)8156 && !(Walk.fEffective & X86_PTE_RW) 8158 8157 && ( ( pVCpu->iem.s.uCpl == 3 8159 8158 && !(fAccess & IEM_ACCESS_WHAT_SYS)) … … 8166 8165 8167 8166 /* Kernel memory accessed by userland? */ 8168 if ( !( fFlags& X86_PTE_US)8167 if ( !(Walk.fEffective & X86_PTE_US) 8169 8168 && pVCpu->iem.s.uCpl == 3 8170 8169 && !(fAccess & IEM_ACCESS_WHAT_SYS)) … … 8177 8176 /* Executing non-executable memory? */ 8178 8177 if ( (fAccess & IEM_ACCESS_TYPE_EXEC) 8179 && ( fFlags& X86_PTE_PAE_NX)8178 && (Walk.fEffective & X86_PTE_PAE_NX) 8180 8179 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) 8181 8180 { … … 8193 8192 /** @todo testcase: check when A and D bits are actually set by the CPU. */ 8194 8193 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A; 8195 if (( fFlags& fAccessedDirty) != fAccessedDirty)8194 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty) 8196 8195 { 8197 8196 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty); … … 8199 8198 } 8200 8199 8201 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;8200 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK); 8202 8201 *pGCPhysMem = GCPhys; 8203 8202 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r92344 r92426 51 51 DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 52 52 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 53 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk); 53 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk, 54 PPGMPTWALKGST pGstWalk); 54 55 #endif 55 56 static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD); … … 1723 1724 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1724 1725 * @param GCPtr Guest Context virtual address of the page. 1725 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages. 1726 * @param pGCPhys Where to store the GC physical address of the page. 1727 * This is page aligned. The fact that the 1728 */ 1729 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys) 1726 * @param pWalk Where to store the page walk information. 1727 */ 1728 VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk) 1730 1729 { 1731 1730 VMCPU_ASSERT_EMT(pVCpu); 1731 Assert(pWalk); 1732 RT_BZERO(pWalk, sizeof(*pWalk)); 1732 1733 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData; 1733 1734 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE); 1734 1735 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE); 1735 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, p fFlags, pGCPhys);1736 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk); 1736 1737 } 1737 1738 … … 1753 1754 * @param pWalk Where to return the walk result. This is valid for some 1754 1755 * error codes as well. 1755 */ 1756 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) 1756 * @param pGstWalk The guest mode specific page walk information. 1757 */ 1758 int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1757 1759 { 1758 1760 VMCPU_ASSERT_EMT(pVCpu); … … 1760 1762 { 1761 1763 case PGMMODE_32_BIT: 1762 p Walk->enmType = PGMPTWALKGSTTYPE_32BIT;1763 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);1764 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT; 1765 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy); 1764 1766 1765 1767 case PGMMODE_PAE: 1766 1768 case PGMMODE_PAE_NX: 1767 p Walk->enmType = PGMPTWALKGSTTYPE_PAE;1768 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);1769 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE; 1770 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae); 1769 1771 1770 1772 case PGMMODE_AMD64: 1771 1773 case PGMMODE_AMD64_NX: 1772 p Walk->enmType = PGMPTWALKGSTTYPE_AMD64;1773 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);1774 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64; 1775 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64); 1774 1776 1775 1777 case PGMMODE_REAL: 1776 1778 case PGMMODE_PROTECTED: 1777 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1779 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1778 1780 return VERR_PGM_NOT_USED_IN_MODE; 1779 1781 … … 1784 1786 default: 1785 1787 AssertFailed(); 1786 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1788 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1787 1789 return VERR_PGM_NOT_USED_IN_MODE; 1788 1790 } … … 1813 1815 * @param pWalk Where to return the walk result. This is valid for 1814 1816 * some error codes as well. 1817 * @param pGstWalk The second-level paging-mode specific walk 1818 * information. 1815 1819 */ 1816 1820 static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, 1817 PPGMPTWALKGST pWalk) 1818 { 1819 Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT); 1821 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1822 { 1823 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT 1824 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID); 1820 1825 switch (pVCpu->pgm.s.enmGuestSlatMode) 1821 1826 { 1822 1827 case PGMSLAT_EPT: 1823 p Walk->enmType = PGMPTWALKGSTTYPE_EPT;1824 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept);1828 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT; 1829 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept); 1825 1830 1826 1831 default: 1827 1832 AssertFailed(); 1828 p Walk->enmType = PGMPTWALKGSTTYPE_INVALID;1833 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID; 1829 1834 return VERR_PGM_NOT_USED_IN_MODE; 1830 1835 } … … 1851 1856 * the result of this walk. This is valid for some error 1852 1857 * codes as well. 1853 */ 1854 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk) 1858 * @param pGstWalk The guest-mode specific walk information. 1859 */ 1860 int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk) 1855 1861 { 1856 1862 /* … … 1858 1864 * We also limit ourselves to the next page. 1859 1865 */ 1860 if ( pWalk-> u.Core.fSucceeded1861 && GCPtr - pWalk-> u.Core.GCPtr == PAGE_SIZE)1862 { 1863 Assert(pWalk->u .Core.uLevel == 0);1864 if (p Walk->enmType == PGMPTWALKGSTTYPE_AMD64)1866 if ( pWalk->fSucceeded 1867 && GCPtr - pWalk->GCPtr == PAGE_SIZE) 1868 { 1869 Assert(pWalk->uLevel == 0); 1870 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64) 1865 1871 { 1866 1872 /* 1867 1873 * AMD64 1868 1874 */ 1869 if (!pWalk-> u.Core.fGigantPage && !pWalk->u.Core.fBigPage)1875 if (!pWalk->fGigantPage && !pWalk->fBigPage) 1870 1876 { 1871 1877 /* … … 1878 1884 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS; 1879 1885 1880 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk-> u.Core.GCPtr >> X86_PD_PAE_SHIFT))1886 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT)) 1881 1887 { 1882 if (p Walk->u.Amd64.pPte)1888 if (pGstWalk->u.Amd64.pPte) 1883 1889 { 1884 1890 X86PTEPAE Pte; 1885 Pte.u = p Walk->u.Amd64.pPte[1].u;1886 if ( (Pte.u & fPteSame) == (p Walk->u.Amd64.Pte.u & fPteSame)1891 Pte.u = pGstWalk->u.Amd64.pPte[1].u; 1892 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame) 1887 1893 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1888 1894 { 1889 1890 pWalk->u.Core.GCPtr = GCPtr; 1891 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1892 pWalk->u.Amd64.Pte.u = Pte.u; 1893 pWalk->u.Amd64.pPte++; 1895 pWalk->GCPtr = GCPtr; 1896 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1897 pGstWalk->u.Amd64.Pte.u = Pte.u; 1898 pGstWalk->u.Amd64.pPte++; 1894 1899 return VINF_SUCCESS; 1895 1900 } 1896 1901 } 1897 1902 } 1898 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk-> u.Core.GCPtr >> X86_PDPT_SHIFT))1903 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT)) 1899 1904 { 1900 1905 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */ 1901 if (p Walk->u.Amd64.pPde)1906 if (pGstWalk->u.Amd64.pPde) 1902 1907 { 1903 1908 X86PDEPAE Pde; 1904 Pde.u = p Walk->u.Amd64.pPde[1].u;1905 if ( (Pde.u & fPdeSame) == (p Walk->u.Amd64.Pde.u & fPdeSame)1909 Pde.u = pGstWalk->u.Amd64.pPde[1].u; 1910 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame) 1906 1911 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask)) 1907 1912 { 1908 1913 /* Get the new PTE and check out the first entry. */ 1909 1914 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)), 1910 &p Walk->u.Amd64.pPt);1915 &pGstWalk->u.Amd64.pPt); 1911 1916 if (RT_SUCCESS(rc)) 1912 1917 { 1913 p Walk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];1918 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0]; 1914 1919 X86PTEPAE Pte; 1915 Pte.u = p Walk->u.Amd64.pPte->u;1916 if ( (Pte.u & fPteSame) == (p Walk->u.Amd64.Pte.u & fPteSame)1920 Pte.u = pGstWalk->u.Amd64.pPte->u; 1921 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame) 1917 1922 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask)) 1918 1923 { 1919 pWalk-> u.Core.GCPtr = GCPtr;1920 pWalk-> u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;1921 p Walk->u.Amd64.Pte.u = Pte.u;1922 p Walk->u.Amd64.Pde.u = Pde.u;1923 p Walk->u.Amd64.pPde++;1924 pWalk->GCPtr = GCPtr; 1925 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK; 1926 pGstWalk->u.Amd64.Pte.u = Pte.u; 1927 pGstWalk->u.Amd64.Pde.u = Pde.u; 1928 pGstWalk->u.Amd64.pPde++; 1924 1929 return VINF_SUCCESS; 1925 1930 } … … 1929 1934 } 1930 1935 } 1931 else if (!pWalk-> u.Core.fGigantPage)1936 else if (!pWalk->fGigantPage) 1932 1937 { 1933 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk-> u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))1938 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK)) 1934 1939 { 1935 pWalk-> u.Core.GCPtr = GCPtr;1936 pWalk-> u.Core.GCPhys += PAGE_SIZE;1940 pWalk->GCPtr = GCPtr; 1941 pWalk->GCPhys += PAGE_SIZE; 1937 1942 return VINF_SUCCESS; 1938 1943 } … … 1940 1945 else 1941 1946 { 1942 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk-> u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))1947 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK)) 1943 1948 { 1944 pWalk-> u.Core.GCPtr = GCPtr;1945 pWalk-> u.Core.GCPhys += PAGE_SIZE;1949 pWalk->GCPtr = GCPtr; 1950 pWalk->GCPhys += PAGE_SIZE; 1946 1951 return VINF_SUCCESS; 1947 1952 } … … 1950 1955 } 1951 1956 /* Case we don't handle. Do full walk. */ 1952 return pgmGstPtWalk(pVCpu, GCPtr, pWalk); 1953 } 1954 1955 1956 /** 1957 * Checks if the page is present. 1958 * 1959 * @returns true if the page is present. 1960 * @returns false if the page is not present. 1961 * @param pVCpu The cross context virtual CPU structure. 1962 * @param GCPtr Address within the page. 1963 */ 1964 VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr) 1965 { 1966 VMCPU_ASSERT_EMT(pVCpu); 1967 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL); 1968 return RT_SUCCESS(rc); 1957 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); 1969 1958 } 1970 1959 … … 3179 3168 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 3180 3169 /* Update the guest SLAT mode if it's a nested-guest. */ 3181 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu)) 3182 { 3183 if (PGMMODE_WITH_PAGING(enmGuestMode)) 3184 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3185 else 3186 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT; 3187 } 3170 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu) 3171 && PGMMODE_WITH_PAGING(enmGuestMode)) 3172 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT; 3188 3173 else 3189 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);3174 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT; 3190 3175 #endif 3191 3176 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r92381 r92426 170 170 * 171 171 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 172 * @param p GstWalkThe guest page table walk result.172 * @param pWalk The guest page table walk result. 173 173 * @param uErr The error code. 174 174 */ 175 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, P GSTPTWALK pGstWalk, RTGCUINT uErr)175 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr) 176 176 { 177 177 /* … … 181 181 ? uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID) 182 182 : uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US); 183 if ( p GstWalk->Core.fRsvdError184 || p GstWalk->Core.fBadPhysAddr)183 if ( pWalk->fRsvdError 184 || pWalk->fBadPhysAddr) 185 185 { 186 186 uNewErr |= X86_TRAP_PF_RSVD | X86_TRAP_PF_P; 187 Assert(!p GstWalk->Core.fNotPresent);188 } 189 else if (!p GstWalk->Core.fNotPresent)187 Assert(!pWalk->fNotPresent); 188 } 189 else if (!pWalk->fNotPresent) 190 190 uNewErr |= X86_TRAP_PF_P; 191 191 TRPMSetErrorCode(pVCpu, uNewErr); 192 192 193 LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", p GstWalk->Core.GCPtr, uErr, pGstWalk->Core.uLevel));193 LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", pWalk->GCPtr, uErr, pWalk->uLevel)); 194 194 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2GuestTrap; }); 195 195 return VINF_EM_RAW_GUEST_TRAP; … … 211 211 * @param pvFault The fault address. 212 212 * @param pPage The guest page at @a pvFault. 213 * @param pGstWalk The guest page table walk result. 213 * @param pWalk The guest page table walk result. 214 * @param pGstWalk The guest paging-mode specific walk information. 214 215 * @param pfLockTaken PGM lock taken here or not (out). This is true 215 216 * when we're called. … … 218 219 RTGCPTR pvFault, PPGMPAGE pPage, bool *pfLockTaken 219 220 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) || defined(DOXYGEN_RUNNING) 221 , PPGMPTWALK pWalk 220 222 , PGSTPTWALK pGstWalk 221 223 # endif … … 234 236 */ 235 237 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 236 const RTGCPHYS GCPhysFault = p GstWalk->Core.GCPhys;238 const RTGCPHYS GCPhysFault = pWalk->GCPhys; 237 239 # else 238 240 const RTGCPHYS GCPhysFault = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault); … … 277 279 && pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE 278 280 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 279 && (p GstWalk->Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))280 281 && (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) 282 == PGM_PTATTRS_W_MASK /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */ 281 283 # endif 282 284 ) … … 418 420 * Walk the guest page translation tables and check if it's a guest fault. 419 421 */ 422 PGMPTWALK Walk; 420 423 GSTPTWALK GstWalk; 421 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, & GstWalk);424 rc = PGM_GST_NAME(Walk)(pVCpu, pvFault, &Walk, &GstWalk); 422 425 if (RT_FAILURE_NP(rc)) 423 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, & GstWalk, uErr));426 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr)); 424 427 425 428 /* assert some GstWalk sanity. */ … … 432 435 /*AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); - ditto */ 433 436 /*AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); - ditto */ 434 Assert( GstWalk.Core.fSucceeded);437 Assert(Walk.fSucceeded); 435 438 436 439 if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)) 437 440 { 438 441 if ( ( (uErr & X86_TRAP_PF_RW) 439 && !( GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)442 && !(Walk.fEffective & PGM_PTATTRS_W_MASK) 440 443 && ( (uErr & X86_TRAP_PF_US) 441 444 || CPUMIsGuestR0WriteProtEnabled(pVCpu)) ) 442 || ((uErr & X86_TRAP_PF_US) && !( GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK))443 || ((uErr & X86_TRAP_PF_ID) && (GstWalk.Core.fEffective & PGM_PTATTRS_NX_MASK))445 || ((uErr & X86_TRAP_PF_US) && !(Walk.fEffective & PGM_PTATTRS_US_MASK)) 446 || ((uErr & X86_TRAP_PF_ID) && (Walk.fEffective & PGM_PTATTRS_NX_MASK)) 444 447 ) 445 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, & GstWalk, uErr));448 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &Walk, uErr)); 446 449 } 447 450 … … 468 471 } 469 472 # endif 470 if ( GstWalk.Core.fBigPage)473 if (Walk.fBigPage) 471 474 { 472 475 Assert(GstWalk.Pde.u & X86_PDE_PS); … … 521 524 Assert(GstWalk.Pte.u == GstWalk.pPte->u); 522 525 } 526 #if 0 527 /* Disabling this since it's not reliable for SMP, see @bugref{10092#c22}. */ 523 528 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, 524 529 ("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u)); 530 #endif 531 525 532 # else /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 526 533 GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */ … … 541 548 PPGMPAGE pPage; 542 549 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 543 rc = pgmPhysGetPageEx(pVM, GstWalk.Core.GCPhys, &pPage);550 rc = pgmPhysGetPageEx(pVM, Walk.GCPhys, &pPage); 544 551 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 545 552 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, 546 pfLockTaken, & GstWalk));553 pfLockTaken, &Walk, &GstWalk)); 547 554 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 548 555 # else … … 618 625 #ifdef DEBUG_bird 619 626 AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u)); // - triggers with smp w7 guests. 620 AssertMsg( GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto.627 AssertMsg(Walk.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u || pVM->cCpus > 1, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u)); // - ditto. 621 628 #endif 622 629 } … … 669 676 */ 670 677 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 671 RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;678 RTGCPHYS GCPhys = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 672 679 # else 673 680 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK); … … 694 701 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 695 702 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken, 696 & GstWalk));703 &Walk, &GstWalk)); 697 704 # else 698 705 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken)); … … 778 785 * Check to see if we need to emulate the instruction if CR0.WP=0. 779 786 */ 780 if ( !( GstWalk.Core.fEffective & PGM_PTATTRS_W_MASK)787 if ( !(Walk.fEffective & PGM_PTATTRS_W_MASK) 781 788 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG 782 789 && CPUMGetGuestCPL(pVCpu) < 3) … … 797 804 */ 798 805 # if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) && 1 799 if ( ( GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK800 && ( GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))806 if ( (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK 807 && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 801 808 && pVM->cCpus == 1 /* Sorry, no go on SMP. Add CFGM option? */) 802 809 { 803 Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, GstWalk.Core.fBigPage));804 rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, GstWalk.Core.fBigPage, PGM_MK_PG_IS_WRITE_FAULT);810 Log(("PGM #PF: Netware WP0+RO+US hack: pvFault=%RGp uErr=%#x (big=%d)\n", pvFault, uErr, Walk.fBigPage)); 811 rc = pgmShwMakePageSupervisorAndWritable(pVCpu, pvFault, Walk.fBigPage, PGM_MK_PG_IS_WRITE_FAULT); 805 812 if (rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3) 806 813 { … … 817 824 /* Interpret the access. */ 818 825 rc = VBOXSTRICTRC_TODO(PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault)); 819 Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), GstWalk.Core.fBigPage, !!(GstWalk.Core.fEffective & PGM_PTATTRS_US_MASK)));826 Log(("PGM #PF: WP0 emulation (pvFault=%RGp uErr=%#x cpl=%d fBig=%d fEffUs=%d)\n", pvFault, uErr, CPUMGetGuestCPL(pVCpu), Walk.fBigPage, !!(Walk.fEffective & PGM_PTATTRS_US_MASK))); 820 827 if (RT_SUCCESS(rc)) 821 828 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eWPEmulInRZ); … … 855 862 # endif 856 863 # ifdef VBOX_STRICT 857 RTGCPHYS GCPhys2 = RTGCPHYS_MAX;858 uint64_t fPageGst = UINT64_MAX;864 PGMPTWALK GstPageWalk; 865 GstPageWalk.GCPhys = RTGCPHYS_MAX; 859 866 if (!pVM->pgm.s.fNestedPaging) 860 867 { 861 rc = PGMGstGetPage(pVCpu, pvFault, & fPageGst, &GCPhys2);862 AssertMsg(RT_SUCCESS(rc) && (( fPageGst & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst));863 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, G CPhys2, (uint64_t)fPageGst));868 rc = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk); 869 AssertMsg(RT_SUCCESS(rc) && ((GstPageWalk.fEffective & X86_PTE_RW) || ((CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG && CPUMGetGuestCPL(pVCpu) < 3)), ("rc=%Rrc fPageGst=%RX64\n", rc, GstPageWalk.fEffective)); 870 LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GstPageWalk.GCPhys, GstPageWalk.fEffective)); 864 871 } 865 872 # if 0 /* Bogus! Triggers incorrectly with w7-64 and later for the SyncPage case: "Pde at %RGv changed behind our back?" */ … … 867 874 rc = PGMShwGetPage(pVCpu, pvFault, &fPageShw, NULL); 868 875 AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, 869 ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, G CPhys2, fPageGst, pvFault));876 ("rc=%Rrc fPageShw=%RX64 GCPhys2=%RGp fPageGst=%RX64 pvFault=%RGv\n", rc, fPageShw, GstPageWalk.GCPhys, fPageGst, pvFault)); 870 877 # endif 871 878 # endif /* VBOX_STRICT */ … … 879 886 * mode accesses the page again. 880 887 */ 881 else if ( ( GstWalk.Core.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK882 && ( GstWalk.Core.fBigPage || (GstWalk.Pde.u & X86_PDE_RW))888 else if ( (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) == PGM_PTATTRS_US_MASK 889 && (Walk.fBigPage || (GstWalk.Pde.u & X86_PDE_RW)) 883 890 && pVCpu->pgm.s.cNetwareWp0Hacks > 0 884 891 && (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG … … 909 916 { 910 917 /* Get guest page flags. */ 911 uint64_t fPageGst;912 int rc2 = PGMGstGetPage(pVCpu, pvFault, & fPageGst, NULL);918 PGMPTWALK GstPageWalk; 919 int rc2 = PGMGstGetPage(pVCpu, pvFault, &GstPageWalk); 913 920 if (RT_SUCCESS(rc2)) 914 921 { -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r92336 r92426 24 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 26 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, P GSTPTWALK pWalk);27 #endif 28 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);26 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk); 27 #endif 28 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk); 29 29 PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 30 30 … … 76 76 77 77 78 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)78 DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 79 79 { 80 80 NOREF(iLevel); NOREF(pVCpu); 81 pWalk-> Core.fNotPresent = true;82 pWalk-> Core.uLevel = (uint8_t)iLevel;81 pWalk->fNotPresent = true; 82 pWalk->uLevel = (uint8_t)iLevel; 83 83 return VERR_PAGE_TABLE_NOT_PRESENT; 84 84 } 85 85 86 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel, int rc)86 DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc) 87 87 { 88 88 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu); 89 pWalk-> Core.fBadPhysAddr = true;90 pWalk-> Core.uLevel = (uint8_t)iLevel;89 pWalk->fBadPhysAddr = true; 90 pWalk->uLevel = (uint8_t)iLevel; 91 91 return VERR_PAGE_TABLE_NOT_PRESENT; 92 92 } 93 93 94 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)94 DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 95 95 { 96 96 NOREF(pVCpu); 97 pWalk-> Core.fRsvdError = true;98 pWalk-> Core.uLevel = (uint8_t)iLevel;97 pWalk->fRsvdError = true; 98 pWalk->uLevel = (uint8_t)iLevel; 99 99 return VERR_PAGE_TABLE_NOT_PRESENT; 100 100 } … … 110 110 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 111 111 * @param GCPtr The guest virtual address to walk by. 112 * @param pWalk Where to return the walk result. This is always set. 113 */ 114 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 112 * @param pWalk The common page walk information. 113 * @param pGstWalk The guest mode specific page walk information. 114 * 115 * @warning Callers must initialize @a pWalk and @a pGstWalk before calling this 116 * function. 117 */ 118 DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk) 115 119 { 116 120 int rc; 117 121 118 122 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 123 /** @def PGM_GST_SLAT_WALK 124 * Macro to perform guest second-level address translation (EPT or Nested). 125 * 126 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 127 * @param a_GCPtrNested The nested-guest linear address that caused the 128 * second-level translation. 129 * @param a_GCPhysNested The nested-guest physical address to translate. 130 * @param a_GCPhysOut Where to store the guest-physical address (result). 131 */ 119 132 # define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \ 120 133 do { \ 121 134 if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \ 122 135 { \ 123 PGMPTWALKGST SlatWalk; \ 124 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk); \ 136 PGMPTWALK SlatWalk; \ 137 PGMPTWALKGST SlatGstWalk; \ 138 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk, \ 139 &SlatGstWalk); \ 125 140 if (RT_SUCCESS(rcX)) \ 126 (a_GCPhysOut) = SlatWalk. u.Core.GCPhys; \141 (a_GCPhysOut) = SlatWalk.GCPhys; \ 127 142 else \ 128 143 { \ 129 (a_pWalk)->Core = SlatWalk.u.Core; \144 *(a_pWalk) = SlatWalk; \ 130 145 return rcX; \ 131 146 } \ … … 135 150 136 151 /* 137 * Init the walking structure .152 * Init the walking structures. 138 153 */ 139 154 RT_ZERO(*pWalk); 140 pWalk->Core.GCPtr = GCPtr; 155 RT_ZERO(*pGstWalk); 156 pWalk->GCPtr = GCPtr; 141 157 142 158 # if PGM_GST_TYPE == PGM_TYPE_32BIT \ … … 155 171 * The PML4 table. 156 172 */ 157 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &p Walk->pPml4);173 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4); 158 174 if (RT_SUCCESS(rc)) { /* probable */ } 159 175 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 160 176 161 177 PX86PML4E pPml4e; 162 p Walk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];178 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK]; 163 179 X86PML4E Pml4e; 164 p Walk->Pml4e.u = Pml4e.u = pPml4e->u;180 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u; 165 181 166 182 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ } … … 170 186 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4); 171 187 172 pWalk-> Core.fEffective = fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT173 188 pWalk->fEffective = fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT 189 | X86_PML4E_PCD | X86_PML4E_A | X86_PML4E_NX); 174 190 175 191 /* … … 180 196 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk); 181 197 #endif 182 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &p Walk->pPdpt);198 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt); 183 199 if (RT_SUCCESS(rc)) { /* probable */ } 184 200 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); 185 201 186 202 # elif PGM_GST_TYPE == PGM_TYPE_PAE 187 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &p Walk->pPdpt);203 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt); 188 204 if (RT_SUCCESS(rc)) { /* probable */ } 189 205 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); … … 193 209 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 194 210 PX86PDPE pPdpe; 195 p Walk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];211 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 196 212 X86PDPE Pdpe; 197 p Walk->Pdpe.u = Pdpe.u = pPdpe->u;213 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u; 198 214 199 215 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ } … … 204 220 205 221 # if PGM_GST_TYPE == PGM_TYPE_AMD64 206 pWalk-> Core.fEffective = fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US207 208 222 pWalk->fEffective = fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US 223 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A)) 224 | (Pdpe.u & X86_PDPE_LM_NX); 209 225 # else 210 226 /* NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set. */ 211 pWalk-> Core.fEffective = fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A212 227 pWalk->fEffective = fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A 228 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD)); 213 229 # endif 214 230 … … 220 236 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk); 221 237 # endif 222 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &p Walk->pPd);238 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd); 223 239 if (RT_SUCCESS(rc)) { /* probable */ } 224 240 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc); 225 241 226 242 # elif PGM_GST_TYPE == PGM_TYPE_32BIT 227 rc = pgmGstGet32bitPDPtrEx(pVCpu, &p Walk->pPd);243 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd); 228 244 if (RT_SUCCESS(rc)) { /* probable */ } 229 245 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc); … … 232 248 { 233 249 PGSTPDE pPde; 234 p Walk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];250 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK]; 235 251 GSTPDE Pde; 236 p Walk->Pde.u = Pde.u = pPde->u;252 pGstWalk->Pde.u = Pde.u = pPde->u; 237 253 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ } 238 254 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2); … … 253 269 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G); 254 270 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT; 255 pWalk-> Core.fEffective = fEffective;271 pWalk->fEffective = fEffective; 256 272 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK)); 257 273 Assert(fEffective & PGM_PTATTRS_R_MASK); 258 274 259 pWalk-> Core.fBigPage = true;260 pWalk-> Core.fSucceeded = true;275 pWalk->fBigPage = true; 276 pWalk->fSucceeded = true; 261 277 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 262 278 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK); … … 264 280 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk); 265 281 # endif 266 pWalk-> Core.GCPhys = GCPhysPde;267 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);282 pWalk->GCPhys = GCPhysPde; 283 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 268 284 return VINF_SUCCESS; 269 285 } … … 272 288 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2); 273 289 # if PGM_GST_TYPE == PGM_TYPE_32BIT 274 pWalk-> Core.fEffective = fEffective = Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US275 290 pWalk->fEffective = fEffective = Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US 291 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A); 276 292 # else 277 pWalk-> Core.fEffective = fEffective &= (Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US278 279 293 pWalk->fEffective = fEffective &= (Pde.u & ( X86_PDE_P | X86_PDE_RW | X86_PDE_US 294 | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A)) 295 | (Pde.u & X86_PDE_PAE_NX); 280 296 # endif 281 297 … … 287 303 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk); 288 304 # endif 289 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &p Walk->pPt);305 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt); 290 306 if (RT_SUCCESS(rc)) { /* probable */ } 291 307 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); … … 293 309 { 294 310 PGSTPTE pPte; 295 p Walk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];311 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK]; 296 312 GSTPTE Pte; 297 p Walk->Pte.u = Pte.u = pPte->u;313 pGstWalk->Pte.u = Pte.u = pPte->u; 298 314 299 315 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ } … … 313 329 # endif 314 330 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G); 315 pWalk-> Core.fEffective = fEffective;331 pWalk->fEffective = fEffective; 316 332 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK)); 317 333 Assert(fEffective & PGM_PTATTRS_R_MASK); 318 334 319 pWalk-> Core.fSucceeded = true;335 pWalk->fSucceeded = true; 320 336 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte) 321 337 | (GCPtr & PAGE_OFFSET_MASK); … … 323 339 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk); 324 340 # endif 325 pWalk-> Core.GCPhys = GCPhysPte;341 pWalk->GCPhys = GCPhysPte; 326 342 return VINF_SUCCESS; 327 343 } … … 341 357 * @param pVCpu The cross context virtual CPU structure. 342 358 * @param GCPtr Guest Context virtual address of the page. 343 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages. 344 * @param pGCPhys Where to store the GC physical address of the page. 345 * This is page aligned! 346 */ 347 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys) 359 * @param pWalk Where to store the page walk info. 360 */ 361 PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk) 348 362 { 349 363 #if PGM_GST_TYPE == PGM_TYPE_REAL \ … … 352 366 * Fake it. 353 367 */ 354 if (pfFlags) 355 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 356 if (pGCPhys) 357 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK; 368 RT_ZERO(*pWalk); 369 pWalk->fSucceeded = true; 370 pWalk->GCPtr = GCPtr; 371 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK; 372 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US; 373 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK; 358 374 NOREF(pVCpu); 359 375 return VINF_SUCCESS; … … 363 379 || PGM_GST_TYPE == PGM_TYPE_AMD64 364 380 365 GSTPTWALK Walk; 366 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 381 PGMPTWALK Walk; 382 GSTPTWALK GstWalk; 383 RT_ZERO(Walk); 384 RT_ZERO(GstWalk); 385 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk); 367 386 if (RT_FAILURE(rc)) 368 387 return rc; 369 388 370 if (pGCPhys) 371 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 372 373 if (pfFlags) 389 uint64_t fFlags; 390 if (!Walk.fBigPage) 391 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 392 | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK)) 393 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 394 | (Walk.fEffective & PGM_PTATTRS_NX_MASK) 395 # endif 396 ; 397 else 374 398 { 375 if (!Walk.Core.fBigPage) 376 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 377 | (Walk.Core.fEffective & ( PGM_PTATTRS_W_MASK 378 | PGM_PTATTRS_US_MASK)) 399 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 400 | (Walk.fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK)) 379 401 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 380 | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK) 381 # endif 382 ; 383 else 384 { 385 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 386 | (Walk.Core.fEffective & ( PGM_PTATTRS_W_MASK 387 | PGM_PTATTRS_US_MASK 388 | PGM_PTATTRS_PAT_MASK)) 389 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 390 | (Walk.Core.fEffective & PGM_PTATTRS_NX_MASK) 391 # endif 392 ; 393 } 402 | (Walk.fEffective & PGM_PTATTRS_NX_MASK) 403 # endif 404 ; 394 405 } 395 406 407 pWalk->fSucceeded = true; 408 pWalk->GCPtr = GCPtr; 409 pWalk->GCPhys = Walk.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 410 pWalk->fEffective = fFlags; 396 411 return VINF_SUCCESS; 397 412 … … 425 440 for (;;) 426 441 { 427 GSTPTWALK Walk; 428 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk); 442 PGMPTWALK Walk; 443 GSTPTWALK GstWalk; 444 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk); 429 445 if (RT_FAILURE(rc)) 430 446 return rc; 431 447 432 if (!Walk. Core.fBigPage)448 if (!Walk.fBigPage) 433 449 { 434 450 /* … … 438 454 */ 439 455 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK; 440 while (iPTE < RT_ELEMENTS( Walk.pPt->a))456 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a)) 441 457 { 442 GSTPTE Pte = Walk.pPt->a[iPTE];458 GSTPTE Pte = GstWalk.pPt->a[iPTE]; 443 459 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK)) 444 460 | (fFlags & ~GST_PTE_PG_MASK); 445 Walk.pPt->a[iPTE] = Pte;461 GstWalk.pPt->a[iPTE] = Pte; 446 462 447 463 /* next page */ … … 460 476 GSTPDE PdeNew; 461 477 # if PGM_GST_TYPE == PGM_TYPE_32BIT 462 PdeNew.u = ( Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))478 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS)) 463 479 # else 464 PdeNew.u = ( Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))480 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) 465 481 # endif 466 482 | (fFlags & ~GST_PTE_PG_MASK) 467 483 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT); 468 * Walk.pPde = PdeNew;484 *GstWalk.pPde = PdeNew; 469 485 470 486 /* advance */ -
trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h
r92336 r92426 17 17 18 18 #if PGM_GST_TYPE == PGM_TYPE_EPT 19 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)19 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 20 20 { 21 21 NOREF(pVCpu); 22 pWalk-> Core.fNotPresent = true;23 pWalk-> Core.uLevel = (uint8_t)iLevel;22 pWalk->fNotPresent = true; 23 pWalk->uLevel = (uint8_t)iLevel; 24 24 return VERR_PAGE_TABLE_NOT_PRESENT; 25 25 } 26 26 27 27 28 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel, int rc)28 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc) 29 29 { 30 30 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu); 31 pWalk-> Core.fBadPhysAddr = true;32 pWalk-> Core.uLevel = (uint8_t)iLevel;31 pWalk->fBadPhysAddr = true; 32 pWalk->uLevel = (uint8_t)iLevel; 33 33 return VERR_PAGE_TABLE_NOT_PRESENT; 34 34 } 35 35 36 36 37 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, P GSTPTWALK pWalk, int iLevel)37 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel) 38 38 { 39 39 NOREF(pVCpu); 40 pWalk-> Core.fRsvdError = true;41 pWalk-> Core.uLevel = (uint8_t)iLevel;40 pWalk->fRsvdError = true; 41 pWalk->uLevel = (uint8_t)iLevel; 42 42 return VERR_PAGE_TABLE_NOT_PRESENT; 43 43 } … … 45 45 46 46 DECLINLINE(int) PGM_GST_SLAT_NAME_EPT(Walk)(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, 47 PGSTPTWALK pWalk) 48 { 47 PPGMPTWALK pWalk, PGSTPTWALK pGstWalk) 48 { 49 /** @todo implement figuring out fEptMisconfig. */ 49 50 /* 50 * Init walk structure .51 * Init walk structures. 51 52 */ 52 int rc;53 53 RT_ZERO(*pWalk); 54 pWalk->Core.GCPtr = GCPtrNested; 55 pWalk->Core.GCPhysNested = GCPhysNested; 56 pWalk->Core.fIsSlat = true; 57 pWalk->Core.fIsLinearAddrValid = fIsLinearAddrValid; 54 RT_ZERO(*pGstWalk); 55 56 pWalk->GCPtr = GCPtrNested; 57 pWalk->GCPhysNested = GCPhysNested; 58 pWalk->fIsLinearAddrValid = fIsLinearAddrValid; 59 pWalk->fIsSlat = true; 58 60 59 61 /* … … 82 84 uint64_t fEffective; 83 85 { 84 rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);86 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pGstWalk->pPml4); 85 87 if (RT_SUCCESS(rc)) { /* probable */ } 86 88 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc); 87 89 88 90 PEPTPML4E pPml4e; 89 p Walk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK];91 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK]; 90 92 EPTPML4E Pml4e; 91 p Walk->Pml4e.u = Pml4e.u = pPml4e->u;93 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u; 92 94 93 95 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ } … … 107 109 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 108 110 | fEffectiveEpt; 109 pWalk-> Core.fEffective = fEffective;110 111 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &p Walk->pPdpt);111 pWalk->fEffective = fEffective; 112 113 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pGstWalk->pPdpt); 112 114 if (RT_SUCCESS(rc)) { /* probable */ } 113 115 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc); … … 115 117 { 116 118 PEPTPDPTE pPdpte; 117 p Walk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK];119 pGstWalk->pPdpte = pPdpte = &pGstWalk->pPdpt->a[(GCPhysNested >> GST_PDPT_SHIFT) & GST_PDPT_MASK]; 118 120 EPTPDPTE Pdpte; 119 p Walk->Pdpte.u = Pdpte.u = pPdpte->u;121 pGstWalk->Pdpte.u = Pdpte.u = pPdpte->u; 120 122 121 123 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ } … … 134 136 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 135 137 | (fEffectiveEpt & fCumulativeEpt); 136 pWalk-> Core.fEffective = fEffective;138 pWalk->fEffective = fEffective; 137 139 } 138 140 else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte)) … … 151 153 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 152 154 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 153 pWalk-> Core.fEffective = fEffective;154 155 pWalk-> Core.fGigantPage = true;156 pWalk-> Core.fSucceeded = true;157 pWalk-> Core.GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)155 pWalk->fEffective = fEffective; 156 157 pWalk->fGigantPage = true; 158 pWalk->fSucceeded = true; 159 pWalk->GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte) 158 160 | (GCPhysNested & GST_GIGANT_PAGE_OFFSET_MASK); 159 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);161 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 160 162 return VINF_SUCCESS; 161 163 } … … 164 166 { 165 167 PGSTPDE pPde; 166 p Walk->pPde = pPde = &pWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK];168 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPhysNested >> GST_PD_SHIFT) & GST_PD_MASK]; 167 169 GSTPDE Pde; 168 pWalk->Pde.u = Pde.u = pPde->u; 170 pGstWalk->Pde.u = Pde.u = pPde->u; 171 169 172 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ } 170 173 else return PGM_GST_SLAT_NAME_EPT(WalkReturnNotPresent)(pVCpu, pWalk, 2); 174 171 175 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu)) 172 176 { … … 187 191 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 188 192 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 189 pWalk-> Core.fEffective = fEffective;190 191 pWalk-> Core.fBigPage = true;192 pWalk-> Core.fSucceeded = true;193 pWalk-> Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)193 pWalk->fEffective = fEffective; 194 195 pWalk->fBigPage = true; 196 pWalk->fSucceeded = true; 197 pWalk->GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde) 194 198 | (GCPhysNested & GST_BIG_PAGE_OFFSET_MASK); 195 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk-> Core.GCPhys);199 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys); 196 200 return VINF_SUCCESS; 197 201 } … … 209 213 | RT_BF_MAKE(PGM_PTATTRS_A, fAccessed) 210 214 | (fEffectiveEpt & fCumulativeEpt); 211 pWalk-> Core.fEffective = fEffective;212 213 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);215 pWalk->fEffective = fEffective; 216 217 int const rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pGstWalk->pPt); 214 218 if (RT_SUCCESS(rc)) { /* probable */ } 215 219 else return PGM_GST_SLAT_NAME_EPT(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc); … … 217 221 { 218 222 PGSTPTE pPte; 219 p Walk->pPte = pPte = &pWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK];223 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPhysNested >> GST_PT_SHIFT) & GST_PT_MASK]; 220 224 GSTPTE Pte; 221 p Walk->Pte.u = Pte.u = pPte->u;225 pGstWalk->Pte.u = Pte.u = pPte->u; 222 226 223 227 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ } … … 240 244 fEffective |= RT_BF_MAKE(PGM_PTATTRS_D, fDirty) 241 245 | RT_BF_MAKE(PGM_PTATTRS_EPT_MEMTYPE, fMemType); 242 pWalk->Core.fEffective = fEffective; 243 244 pWalk->Core.fSucceeded = true; 245 pWalk->Core.GCPhys = GST_GET_PTE_GCPHYS(Pte) 246 | (GCPhysNested & PAGE_OFFSET_MASK); 246 pWalk->fEffective = fEffective; 247 248 pWalk->fSucceeded = true; 249 pWalk->GCPhys = GST_GET_PTE_GCPHYS(Pte) | (GCPhysNested & PAGE_OFFSET_MASK); 247 250 return VINF_SUCCESS; 248 251 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r92391 r92426 2308 2308 VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys) 2309 2309 { 2310 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys); 2310 PGMPTWALK Walk; 2311 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk); 2311 2312 if (pGCPhys && RT_SUCCESS(rc)) 2312 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;2313 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK); 2313 2314 return rc; 2314 2315 } … … 2327 2328 VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys) 2328 2329 { 2329 PVMCC pVM = pVCpu->CTX_SUFF(pVM);2330 RTGCPHYS GCPhys;2331 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);2330 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2331 PGMPTWALK Walk; 2332 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk); 2332 2333 if (RT_SUCCESS(rc)) 2333 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);2334 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys); 2334 2335 return rc; 2335 2336 } … … 3429 3430 VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin) 3430 3431 { 3431 RTGCPHYS GCPhys;3432 uint64_t fFlags;3433 3432 int rc; 3434 3433 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3449 3448 { 3450 3449 /* Convert virtual to physical address + flags */ 3451 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys); 3450 PGMPTWALK Walk; 3451 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk); 3452 3452 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc); 3453 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;3453 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK); 3454 3454 3455 3455 /* mark the guest page as accessed. */ 3456 if (!( fFlags& X86_PTE_A))3456 if (!(Walk.fEffective & X86_PTE_A)) 3457 3457 { 3458 3458 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A)); … … 3469 3469 { 3470 3470 /* Convert virtual to physical address + flags */ 3471 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys); 3471 PGMPTWALK Walk; 3472 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk); 3472 3473 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc); 3473 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;3474 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK); 3474 3475 3475 3476 /* mark the guest page as accessed. */ 3476 if (!( fFlags& X86_PTE_A))3477 if (!(Walk.fEffective & X86_PTE_A)) 3477 3478 { 3478 3479 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A)); … … 3520 3521 VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin) 3521 3522 { 3522 RTGCPHYS GCPhys;3523 uint64_t fFlags;3524 3523 int rc; 3525 3524 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 3540 3539 { 3541 3540 /* Convert virtual to physical address + flags */ 3542 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys); 3541 PGMPTWALK Walk; 3542 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk); 3543 3543 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc); 3544 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;3544 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK); 3545 3545 3546 3546 /* Mention when we ignore X86_PTE_RW... */ 3547 if (!( fFlags& X86_PTE_RW))3547 if (!(Walk.fEffective & X86_PTE_RW)) 3548 3548 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb)); 3549 3549 3550 3550 /* Mark the guest page as accessed and dirty if necessary. */ 3551 if (( fFlags& (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))3551 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D)) 3552 3552 { 3553 3553 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); … … 3564 3564 { 3565 3565 /* Convert virtual to physical address + flags */ 3566 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys); 3566 PGMPTWALK Walk; 3567 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk); 3567 3568 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc); 3568 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;3569 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK); 3569 3570 3570 3571 /* Mention when we ignore X86_PTE_RW... */ 3571 if (!( fFlags& X86_PTE_RW))3572 if (!(Walk.fEffective & X86_PTE_RW)) 3572 3573 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb)); 3573 3574 3574 3575 /* Mark the guest page as accessed and dirty if necessary. */ 3575 if (( fFlags& (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))3576 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D)) 3576 3577 { 3577 3578 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r91854 r92426 569 569 * set instead of resolving the guest physical 570 570 * address yet again. */ 571 RTGCPHYS GCPhys; 572 uint64_t fGstPte; 573 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys); 571 PGMPTWALK GstWalk; 572 rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk); 574 573 AssertRC(rc); 575 574 if (RT_SUCCESS(rc)) 576 575 { 577 Assert(( fGstPte & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));578 PPGMPAGE pPage = pgmPhysGetPage(pVM, G CPhys);576 Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */)); 577 PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys); 579 578 Assert(pPage); 580 579 if (pPage) 581 580 { 582 rc = pgmPhysPageMakeWritable(pVM, pPage, G CPhys);581 rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys); 583 582 AssertRCReturn(rc, rc); 584 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, G CPhys, pPage));583 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage)); 585 584 } 586 585 }
Note:
See TracChangeset
for help on using the changeset viewer.