Changeset 81637 in vbox
- Timestamp:
- Nov 4, 2019 4:22:10 AM (5 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r81579 r81637 285 285 /** Pointer to a const VMX transient state. */ 286 286 typedef const VMXTRANSIENT *PCVMXTRANSIENT; 287 288 /** 289 * VMX page allocation information. 290 */ 291 typedef struct 292 { 293 uint32_t fValid; /**< Whether to allocate this page (e.g, based on a CPU feature). */ 294 uint32_t uPadding0; /**< Padding to ensure array of these structs are aligned to a multiple of 8. */ 295 PRTHCPHYS pHCPhys; /**< Where to store the host-physical address of the allocation. */ 296 PRTR0PTR ppVirt; /**< Where to store the host-virtual address of the allocation. */ 297 } VMXPAGEALLOCINFO; 298 /** Pointer to VMX page-allocation info. */ 299 typedef VMXPAGEALLOCINFO *PVMXPAGEALLOCINFO; 300 /** Pointer to a const VMX page-allocation info. */ 301 typedef const VMXPAGEALLOCINFO *PCVMXPAGEALLOCINFO; 302 AssertCompileSizeAlignment(VMXPAGEALLOCINFO, 8); 287 303 288 304 /** … … 1684 1700 1685 1701 /** 1686 * Allocates and maps a physically contiguous page. The allocated page is 1687 * zero'd out (used by various VT-x structures). 1688 * 1689 * @returns IPRT status code. 1690 * @param pMemObj Pointer to the ring-0 memory object. 1691 * @param ppVirt Where to store the virtual address of the allocation. 1692 * @param pHCPhys Where to store the physical address of the allocation. 1693 */ 1694 static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 1695 { 1696 AssertPtr(pMemObj); 1697 AssertPtr(ppVirt); 1698 AssertPtr(pHCPhys); 1699 int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */); 1700 if (RT_FAILURE(rc)) 1701 return rc; 1702 *ppVirt = RTR0MemObjAddress(*pMemObj); 1703 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */); 1704 ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE); 1702 * Allocates pages specified as specified by an array of VMX page allocation info 1703 * objects. 1704 * 1705 * The pages contents are zero'd after allocation. 1706 * 1707 * @returns VBox status code. 1708 * @param hMemObj The ring-0 memory object associated with the allocation. 1709 * @param paAllocInfo The pointer to the first element of the VMX 1710 * page-allocation info object array. 1711 * @param cEntries The number of elements in the @a paAllocInfo array. 1712 */ 1713 static int hmR0VmxPagesAllocZ(RTR0MEMOBJ hMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries) 1714 { 1715 /* Figure out how many pages to allocate. */ 1716 uint32_t cPages = 0; 1717 for (uint32_t iPage = 0; iPage < cEntries; iPage++) 1718 cPages += !!paAllocInfo[iPage].fValid; 1719 1720 /* Allocate the pages. */ 1721 if (cPages) 1722 { 1723 size_t const cbPages = cPages << X86_PAGE_4K_SHIFT; 1724 int rc = RTR0MemObjAllocPage(&hMemObj, cbPages, false /* fExecutable */); 1725 if (RT_FAILURE(rc)) 1726 return rc; 1727 1728 /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */ 1729 void *pvFirstPage = RTR0MemObjAddress(hMemObj); 1730 ASMMemZero32(pvFirstPage, cbPages); 1731 1732 uint32_t iPage = 0; 1733 for (uint32_t i = 0; i < cEntries; i++) 1734 if (paAllocInfo[i].fValid) 1735 { 1736 RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(hMemObj, iPage); 1737 void *pvPage = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT)); 1738 Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS); 1739 AssertPtr(pvPage); 1740 1741 Assert(paAllocInfo[iPage].pHCPhys); 1742 Assert(paAllocInfo[iPage].ppVirt); 1743 *paAllocInfo[iPage].pHCPhys = HCPhysPage; 1744 *paAllocInfo[iPage].ppVirt = pvPage; 1745 1746 /* Move to next page. */ 1747 ++iPage; 1748 } 1749 1750 /* Make sure all valid (requested) pages have been assigned. */ 1751 Assert(iPage == cPages); 1752 } 1705 1753 return VINF_SUCCESS; 1706 1754 } … … 1708 1756 1709 1757 /** 1710 * Frees and unmaps an allocated, physical page. 1711 * 1712 * @param pMemObj Pointer to the ring-0 memory object. 1713 * @param ppVirt Where to re-initialize the virtual address of allocation as 1714 * 0. 1715 * @param pHCPhys Where to re-initialize the physical address of the 1716 * allocation as 0. 1717 */ 1718 static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 1719 { 1720 AssertPtr(pMemObj); 1721 AssertPtr(ppVirt); 1722 AssertPtr(pHCPhys); 1723 /* NULL is valid, accepted and ignored by the free function below. */ 1724 RTR0MemObjFree(*pMemObj, true /* fFreeMappings */); 1725 *pMemObj = NIL_RTR0MEMOBJ; 1726 *ppVirt = NULL; 1727 *pHCPhys = NIL_RTHCPHYS; 1758 * Frees pages allocated using hmR0VmxPagesAllocZ. 1759 * 1760 * @param hMemObj The ring-0 memory object associated with the allocation. 1761 */ 1762 DECL_FORCE_INLINE(void) hmR0VmxPagesFree(RTR0MEMOBJ hMemObj) 1763 { 1764 /* We can cleanup wholesale since it's all one allocation. */ 1765 RTR0MemObjFree(hMemObj, true /* fFreeMappings */); 1728 1766 } 1729 1767 … … 1734 1772 * @param pVmcsInfo The VMCS info. object. 1735 1773 */ 1736 static void hmR0Vmx InitVmcsInfo(PVMXVMCSINFO pVmcsInfo)1774 static void hmR0VmxVmcsInfoInit(PVMXVMCSINFO pVmcsInfo) 1737 1775 { 1738 1776 memset(pVmcsInfo, 0, sizeof(*pVmcsInfo)); 1739 1777 1740 Assert(pVmcsInfo->hMemObjVmcs == NIL_RTR0MEMOBJ); 1741 Assert(pVmcsInfo->hMemObjShadowVmcs == NIL_RTR0MEMOBJ); 1742 Assert(pVmcsInfo->hMemObjMsrBitmap == NIL_RTR0MEMOBJ); 1743 Assert(pVmcsInfo->hMemObjGuestMsrLoad == NIL_RTR0MEMOBJ); 1744 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ); 1745 Assert(pVmcsInfo->hMemObjHostMsrLoad == NIL_RTR0MEMOBJ); 1778 Assert(pVmcsInfo->hMemObj == NIL_RTR0MEMOBJ); 1746 1779 pVmcsInfo->HCPhysVmcs = NIL_RTHCPHYS; 1747 1780 pVmcsInfo->HCPhysShadowVmcs = NIL_RTHCPHYS; … … 1761 1794 * Frees the VT-x structures for a VMCS info. object. 1762 1795 * 1763 * @param pVM The cross context VM structure.1764 1796 * @param pVmcsInfo The VMCS info. object. 1765 1797 */ 1766 static void hmR0VmxFreeVmcsInfo(PVMCC pVM, PVMXVMCSINFO pVmcsInfo) 1767 { 1768 hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs); 1769 1770 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1771 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1772 hmR0VmxPageFree(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs); 1773 #endif 1774 1775 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1776 hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap); 1777 1778 hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, &pVmcsInfo->HCPhysHostMsrLoad); 1779 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, &pVmcsInfo->HCPhysGuestMsrLoad); 1780 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore); 1781 1782 hmR0VmxInitVmcsInfo(pVmcsInfo); 1798 static void hmR0VmxVmcsInfoFree(PVMXVMCSINFO pVmcsInfo) 1799 { 1800 if (pVmcsInfo->hMemObj != NIL_RTR0MEMOBJ) 1801 { 1802 hmR0VmxPagesFree(pVmcsInfo->hMemObj); 1803 hmR0VmxVmcsInfoInit(pVmcsInfo); 1804 } 1783 1805 } 1784 1806 … … 1791 1813 * @param pVmcsInfo The VMCS info. object. 1792 1814 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS. 1815 * 1816 * @remarks The caller is expected to take care of any and all allocation failures. 1817 * This function will not perform any cleanup for failures half-way 1818 * through. 1793 1819 */ 1794 1820 static int hmR0VmxAllocVmcsInfo(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs) … … 1796 1822 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1797 1823 1798 /* Allocate the guest VM control structure (VMCS). */ 1799 int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs); 1800 if (RT_SUCCESS(rc)) 1824 bool const fMsrBitmaps = RT_BOOL(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS); 1825 bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hm.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing; 1826 Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing); /* VMCS shadowing is not yet exposed to the guest. */ 1827 VMXPAGEALLOCINFO aAllocInfo[] = { 1828 { true, 0 /* Unused */, &pVmcsInfo->HCPhysVmcs, &pVmcsInfo->pvVmcs }, 1829 { true, 0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad }, 1830 { true, 0 /* Unused */, &pVmcsInfo->HCPhysHostMsrLoad, &pVmcsInfo->pvHostMsrLoad }, 1831 { fMsrBitmaps, 0 /* Unused */, &pVmcsInfo->HCPhysMsrBitmap, &pVmcsInfo->pvMsrBitmap }, 1832 { fShadowVmcs, 0 /* Unused */, &pVmcsInfo->HCPhysShadowVmcs, &pVmcsInfo->pvShadowVmcs }, 1833 }; 1834 1835 int rc = hmR0VmxPagesAllocZ(pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo)); 1836 if (RT_FAILURE(rc)) 1837 return rc; 1838 1839 /* 1840 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas. 1841 * Because they contain a symmetric list of guest MSRs to load on VM-entry and store on VM-exit. 1842 */ 1843 AssertCompile(RT_ELEMENTS(aAllocInfo) > 0); 1844 Assert(pVmcsInfo->HCPhysGuestMsrLoad != NIL_RTHCPHYS); 1845 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad; 1846 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad; 1847 1848 /* 1849 * Get the virtual-APIC page rather than allocating them again. 1850 */ 1851 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW) 1801 1852 { 1802 1853 if (!fIsNstGstVmcs) 1803 1854 { 1804 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1805 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1806 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs); 1807 #endif 1808 if (RT_SUCCESS(rc)) 1855 if (PDMHasApic(pVM)) 1809 1856 { 1810 /* Get the allocated virtual-APIC page from the virtual APIC device. */ 1811 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) 1812 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)) 1813 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/); 1857 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/); 1858 if (RT_FAILURE(rc)) 1859 return rc; 1814 1860 } 1815 1861 } 1816 1862 else 1817 { 1818 /* We don't yet support exposing VMCS shadowing to the guest. */ 1819 Assert(pVmcsInfo->HCPhysShadowVmcs == NIL_RTHCPHYS); 1820 Assert(!pVmcsInfo->pvShadowVmcs); 1821 1822 /* Get the allocated virtual-APIC page from CPUM. */ 1823 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW) 1824 { 1825 /** @todo NSTVMX: Get rid of this. There is no need to allocate a separate HC 1826 * page for this. Use the one provided by the nested-guest directly. */ 1827 pVmcsInfo->pbVirtApic = (uint8_t *)CPUMGetGuestVmxVirtApicPage(pVCpu, &pVCpu->cpum.GstCtx, 1828 &pVmcsInfo->HCPhysVirtApic); 1829 Assert(pVmcsInfo->pbVirtApic); 1830 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS); 1831 } 1832 } 1833 1834 if (RT_SUCCESS(rc)) 1835 { 1836 /* 1837 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for 1838 * transparent accesses of specific MSRs. 1839 * 1840 * If the condition for enabling MSR bitmaps changes here, don't forget to 1841 * update HMIsMsrBitmapActive(). 1842 * 1843 * We don't share MSR bitmaps between the guest and nested-guest as we then 1844 * don't need to care about carefully restoring the guest MSR bitmap. 1845 * The guest visible nested-guest MSR bitmap needs to remain unchanged. 1846 * Hence, allocate a separate MSR bitmap for the guest and nested-guest. 1847 * We also don't need to re-initialize the nested-guest MSR bitmap here as 1848 * we do that later while merging VMCS. 1849 */ 1850 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1851 { 1852 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap); 1853 if ( RT_SUCCESS(rc) 1854 && !fIsNstGstVmcs) 1855 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff)); 1856 } 1857 1858 if (RT_SUCCESS(rc)) 1859 { 1860 /* 1861 * Allocate the VM-entry MSR-load area for the guest MSRs. 1862 * 1863 * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between 1864 * the guest and nested-guest. 1865 */ 1866 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, 1867 &pVmcsInfo->HCPhysGuestMsrLoad); 1868 if (RT_SUCCESS(rc)) 1869 { 1870 /* 1871 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas. 1872 * These contain the guest MSRs to load on VM-entry and store on VM-exit. 1873 */ 1874 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ); 1875 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad; 1876 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad; 1877 1878 /* Allocate the VM-exit MSR-load page for the host MSRs. */ 1879 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, 1880 &pVmcsInfo->HCPhysHostMsrLoad); 1881 } 1882 } 1883 } 1884 } 1885 1886 return rc; 1863 pVmcsInfo->pbVirtApic = (uint8_t *)CPUMGetGuestVmxVirtApicPage(pVCpu, &pVCpu->cpum.GstCtx, 1864 &pVmcsInfo->HCPhysVirtApic); 1865 Assert(pVmcsInfo->pbVirtApic); 1866 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS); 1867 } 1868 1869 return VINF_SUCCESS; 1887 1870 } 1888 1871 … … 1896 1879 static void hmR0VmxStructsFree(PVMCC pVM) 1897 1880 { 1898 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1899 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch); 1900 #endif 1901 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess); 1902 1881 hmR0VmxPagesFree(pVM->hm.s.vmx.hMemObj); 1903 1882 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1904 1883 if (pVM->hm.s.vmx.fUseVmcsShadowing) … … 1906 1885 RTMemFree(pVM->hm.s.vmx.paShadowVmcsFields); 1907 1886 RTMemFree(pVM->hm.s.vmx.paShadowVmcsRoFields); 1908 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap, &pVM->hm.s.vmx.HCPhysVmreadBitmap);1909 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, &pVM->hm.s.vmx.HCPhysVmwriteBitmap);1910 1887 } 1911 1888 #endif … … 1914 1891 { 1915 1892 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 1916 PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo; 1917 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo); 1893 hmR0VmxVmcsInfoFree(&pVCpu->hm.s.vmx.VmcsInfo); 1918 1894 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1919 1895 if (pVM->cpum.ro.GuestFeatures.fVmx) 1920 { 1921 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst; 1922 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo); 1923 } 1896 hmR0VmxVmcsInfoFree(&pVCpu->hm.s.vmx.VmcsInfoNstGst); 1924 1897 #endif 1925 1898 } … … 1932 1905 * @returns IPRT status code. 1933 1906 * @param pVM The cross context VM structure. 1907 * 1908 * @remarks This functions will cleanup on memory allocation failures. 1934 1909 */ 1935 1910 static int hmR0VmxStructsAlloc(PVMCC pVM) … … 1951 1926 1952 1927 /* 1953 * Initialize/check members up-front so we can cleanup en masse on allocation failures. 1954 */ 1928 * Allocate per-VM VT-x structures. 1929 */ 1930 bool const fVirtApicAccess = RT_BOOL(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 1931 bool const fUseVmcsShadowing = pVM->hm.s.vmx.fUseVmcsShadowing; 1932 VMXPAGEALLOCINFO aAllocInfo[] = { 1933 { fVirtApicAccess, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess }, 1934 { fUseVmcsShadowing, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap }, 1935 { fUseVmcsShadowing, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap }, 1955 1936 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1956 Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ); 1957 Assert(pVM->hm.s.vmx.pbScratch == NULL); 1958 pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS; 1937 { true, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysScratch, &(PRTR0PTR)pVM->hm.s.vmx.pbScratch }, 1959 1938 #endif 1960 1961 Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ); 1962 Assert(pVM->hm.s.vmx.pbApicAccess == NULL); 1963 pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS; 1964 1965 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 1966 { 1967 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 1968 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo); 1969 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst); 1970 } 1971 1972 /* 1973 * Allocate per-VM VT-x structures. 1974 */ 1975 int rc = VINF_SUCCESS; 1976 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1977 /* Allocate crash-dump magic scratch page. */ 1978 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch); 1939 }; 1940 1941 int rc = hmR0VmxPagesAllocZ(pVM->hm.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo)); 1979 1942 if (RT_FAILURE(rc)) 1980 { 1981 hmR0VmxStructsFree(pVM); 1982 return rc; 1983 } 1984 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic"); 1985 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef); 1986 #endif 1987 1988 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */ 1989 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 1990 { 1991 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, 1992 &pVM->hm.s.vmx.HCPhysApicAccess); 1993 if (RT_FAILURE(rc)) 1994 { 1995 hmR0VmxStructsFree(pVM); 1996 return rc; 1997 } 1998 } 1943 goto cleanup; 1999 1944 2000 1945 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2001 /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps.. */2002 if ( pVM->hm.s.vmx.fUseVmcsShadowing)1946 /* Allocate the shadow VMCS-fields array. */ 1947 if (fUseVmcsShadowing) 2003 1948 { 2004 1949 Assert(!pVM->hm.s.vmx.cShadowVmcsFields); … … 2008 1953 if (RT_LIKELY( pVM->hm.s.vmx.paShadowVmcsFields 2009 1954 && pVM->hm.s.vmx.paShadowVmcsRoFields)) 2010 { 2011 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap, 2012 &pVM->hm.s.vmx.HCPhysVmreadBitmap); 2013 if (RT_SUCCESS(rc)) 2014 { 2015 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, 2016 &pVM->hm.s.vmx.HCPhysVmwriteBitmap); 2017 } 2018 } 1955 { /* likely */ } 2019 1956 else 1957 { 2020 1958 rc = VERR_NO_MEMORY; 2021 2022 if (RT_FAILURE(rc)) 2023 { 2024 hmR0VmxStructsFree(pVM); 2025 return rc; 1959 goto cleanup; 2026 1960 } 2027 1961 } … … 2029 1963 2030 1964 /* 2031 * Initialize per-VCPU VT-x structures.1965 * Allocate per-VCPU VT-x structures. 2032 1966 */ 2033 1967 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 2036 1970 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 2037 1971 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */); 2038 if (RT_SUCCESS(rc)) 2039 { 1972 if (RT_FAILURE(rc)) 1973 goto cleanup; 1974 2040 1975 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2041 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */ 2042 if (pVM->cpum.ro.GuestFeatures.fVmx) 2043 { 2044 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */); 2045 if (RT_SUCCESS(rc)) 2046 { /* likely */ } 2047 else 2048 break; 2049 } 1976 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */ 1977 if (pVM->cpum.ro.GuestFeatures.fVmx) 1978 { 1979 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */); 1980 if (RT_FAILURE(rc)) 1981 goto cleanup; 1982 } 2050 1983 #endif 2051 }2052 else2053 break;2054 }2055 2056 if (RT_FAILURE(rc))2057 {2058 hmR0VmxStructsFree(pVM);2059 return rc;2060 1984 } 2061 1985 2062 1986 return VINF_SUCCESS; 1987 1988 cleanup: 1989 hmR0VmxStructsFree(pVM); 1990 Assert(rc != VINF_SUCCESS); 1991 return rc; 1992 } 1993 1994 1995 /** 1996 * Pre-initializes non-zero fields in VMX structures that will be allocated. 1997 * 1998 * @param pVM The cross context VM structure. 1999 */ 2000 static void hmR0VmxStructsInit(PVMCC pVM) 2001 { 2002 /* Paranoia. */ 2003 Assert(pVM->hm.s.vmx.pbApicAccess == NULL); 2004 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 2005 Assert(pVM->hm.s.vmx.pbScratch == NULL); 2006 #endif 2007 2008 /* 2009 * Initialize members up-front so we can cleanup en masse on allocation failures. 2010 */ 2011 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 2012 pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS; 2013 #endif 2014 pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS; 2015 pVM->hm.s.vmx.HCPhysVmreadBitmap = NIL_RTHCPHYS; 2016 pVM->hm.s.vmx.HCPhysVmwriteBitmap = NIL_RTHCPHYS; 2017 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 2018 { 2019 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 2020 hmR0VmxVmcsInfoInit(&pVCpu->hm.s.vmx.VmcsInfo); 2021 hmR0VmxVmcsInfoInit(&pVCpu->hm.s.vmx.VmcsInfoNstGst); 2022 } 2063 2023 } 2064 2024 … … 3381 3341 * However, if the guest can write to all fields (including read-only fields), 3382 3342 * we treat it a as read/write field. Otherwise, writing to these fields would 3383 * cause a VMWRITE instruction error while syncing the shadow VMCS 3343 * cause a VMWRITE instruction error while syncing the shadow VMCS. 3384 3344 */ 3385 3345 if ( fGstVmwriteAll … … 3406 3366 { 3407 3367 /* 3408 * By default, ensure guest attempts to acces es toany VMCS fields cause VM-exits.3368 * By default, ensure guest attempts to access any VMCS fields cause VM-exits. 3409 3369 */ 3410 3370 uint32_t const cbBitmap = X86_PAGE_4K_SIZE; … … 3565 3525 { 3566 3526 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS); 3527 3528 /* 3529 * By default, ensure guest attempts to access any MSR cause VM-exits. 3530 * This shall later be relaxed for specific MSRs as necessary. 3531 * 3532 * Note: For nested-guests, the entire bitmap will be merged prior to 3533 * executing the nested-guest using hardware-assisted VMX and hence there 3534 * is no need to perform this operation. See hmR0VmxMergeMsrBitmapNested. 3535 */ 3536 Assert(pVmcsInfo->pvMsrBitmap); 3537 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff)); 3567 3538 3568 3539 /* … … 4193 4164 LogFlowFunc(("pVM=%p\n", pVM)); 4194 4165 4166 hmR0VmxStructsInit(pVM); 4195 4167 int rc = hmR0VmxStructsAlloc(pVM); 4196 4168 if (RT_FAILURE(rc)) … … 4200 4172 } 4201 4173 4174 /* Setup the crash dump page. */ 4175 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 4176 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic"); 4177 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef); 4178 #endif 4202 4179 return VINF_SUCCESS; 4203 4180 } -
trunk/src/VBox/VMM/include/HMInternal.h
r81578 r81637 448 448 typedef struct HM 449 449 { 450 /** Set if nested paging is enabled. */ 451 bool fNestedPaging; 450 452 /** Set when we've initialized VMX or SVM. */ 451 453 bool fInitialized; 452 /** Set if nested paging is enabled. */453 bool fNestedPaging;454 454 /** Set if nested paging is allowed. */ 455 455 bool fAllowNestedPaging; … … 525 525 uint8_t cPreemptTimerShift; 526 526 527 /** Virtual address of the TSS page used for real mode emulation. */528 R3PTRTYPE(PVBOXTSS) pRealModeTSS;529 /** Virtual address of the identity page table used for real mode and protected530 * mode without paging emulation in EPT mode. */531 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;532 533 /** Physical address of the APIC-access page. */534 RTHCPHYS HCPhysApicAccess;535 /** R0 memory object for the APIC-access page. */536 RTR0MEMOBJ hMemObjApicAccess;537 527 /** Virtual address of the APIC-access page. */ 538 528 R0PTRTYPE(uint8_t *) pbApicAccess; 539 540 /** Physical address of the VMREAD bitmap. */541 RTHCPHYS HCPhysVmreadBitmap;542 /** Ring-0 memory object for the VMREAD bitmap. */543 RTR0MEMOBJ hMemObjVmreadBitmap;544 529 /** Pointer to the VMREAD bitmap. */ 545 530 R0PTRTYPE(void *) pvVmreadBitmap; 546 547 /** Physical address of the VMWRITE bitmap. */548 RTHCPHYS HCPhysVmwriteBitmap;549 /** Ring-0 memory object for the VMWRITE bitmap. */550 RTR0MEMOBJ hMemObjVmwriteBitmap;551 531 /** Pointer to the VMWRITE bitmap. */ 552 532 R0PTRTYPE(void *) pvVmwriteBitmap; 553 533 554 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 555 /** Physical address of the crash-dump scratch area. */556 RTHCPHYS HCPhysScratch;557 /** Ring-0 memory object for the crash-dump scratch area. */558 RTR0MEMOBJ hMemObjScratch;559 /** Pointer to the crash-dump scratch bitmap. */560 R0PTRTYPE(uint8_t *) pbScratch;561 #endif 534 /** Pointer to the shadow VMCS read-only fields array. */ 535 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields; 536 /** Pointer to the shadow VMCS read/write fields array. */ 537 R0PTRTYPE(uint32_t *) paShadowVmcsFields; 538 /** Number of elements in the shadow VMCS read-only fields array. */ 539 uint32_t cShadowVmcsRoFields; 540 /** Number of elements in the shadow VMCS read-write fields array. */ 541 uint32_t cShadowVmcsFields; 562 542 563 543 /** Tagged-TLB flush type. */ … … 591 571 /** Host-physical address for a failing VMXON instruction. */ 592 572 RTHCPHYS HCPhysVmxEnableError; 593 594 /** Pointer to the shadow VMCS read-only fields array. */ 595 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields; 596 /** Pointer to the shadow VMCS read/write fields array. */ 597 R0PTRTYPE(uint32_t *) paShadowVmcsFields; 598 /** Number of elements in the shadow VMCS read-only fields array. */ 599 uint32_t cShadowVmcsRoFields; 600 /** Number of elements in the shadow VMCS read-write fields array. */ 601 uint32_t cShadowVmcsFields; 573 /** Host-physical address of the APIC-access page. */ 574 RTHCPHYS HCPhysApicAccess; 575 /** Host-physical address of the VMREAD bitmap. */ 576 RTHCPHYS HCPhysVmreadBitmap; 577 /** Host-physical address of the VMWRITE bitmap. */ 578 RTHCPHYS HCPhysVmwriteBitmap; 579 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 580 /** Host-physical address of the crash-dump scratch area. */ 581 RTHCPHYS HCPhysScratch; 582 #endif 583 584 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 585 /** Pointer to the crash-dump scratch bitmap. */ 586 R0PTRTYPE(uint8_t *) pbScratch; 587 #endif 588 /** Virtual address of the TSS page used for real mode emulation. */ 589 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 590 /** Virtual address of the identity page table used for real mode and protected 591 * mode without paging emulation in EPT mode. */ 592 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 593 594 /** Ring-0 memory object for per-VM VMX structures. */ 595 RTR0MEMOBJ hMemObj; 602 596 } vmx; 603 597 … … 664 658 /** Pointer to HM VM instance data. */ 665 659 typedef HM *PHM; 666 667 660 AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8); 661 AssertCompileMemberAlignment(HM, vmx, 8); 662 AssertCompileMemberAlignment(HM, svm, 8); 663 668 664 669 665 /** … … 692 688 * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX. 693 689 * 694 * The members here are ordered and aligned based on estimated frequency of usage 695 * and grouped to fit within a cache line in hot code paths. 690 * Note! The members here are ordered and aligned based on estimated frequency of 691 * usage and grouped to fit within a cache line in hot code paths. Even subtle 692 * changes here have a noticeable effect in the bootsector benchmarks. Modify with 693 * care. 696 694 */ 697 695 typedef struct VMXVMCSINFO … … 807 805 /** @} */ 808 806 809 /** @name R0-memory objects address ofVMCS and related data structures.807 /** @name R0-memory objects address for VMCS and related data structures. 810 808 * @{ */ 811 /** The VMCS. */ 812 RTR0MEMOBJ hMemObjVmcs; 813 /** R0 memory object for the shadow VMCS. */ 814 RTR0MEMOBJ hMemObjShadowVmcs; 815 /** R0 memory object for the MSR bitmap. */ 816 RTR0MEMOBJ hMemObjMsrBitmap; 817 /** R0 memory object of the VM-entry MSR-load area. */ 818 RTR0MEMOBJ hMemObjGuestMsrLoad; 819 /** R0 memory object of the VM-exit MSR-store area. */ 820 RTR0MEMOBJ hMemObjGuestMsrStore; 821 /** R0 memory object for the VM-exit MSR-load area. */ 822 RTR0MEMOBJ hMemObjHostMsrLoad; 809 /** R0-memory object for VMCS and related data structures. */ 810 RTR0MEMOBJ hMemObj; 823 811 /** @} */ 824 812 … … 831 819 typedef const VMXVMCSINFO *PCVMXVMCSINFO; 832 820 AssertCompileSizeAlignment(VMXVMCSINFO, 8); 833 AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM, 8); 834 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4); 835 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8); 836 AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8); 837 AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8); 838 AssertCompileMemberAlignment(VMXVMCSINFO, hMemObjVmcs, 8); 821 AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM, 8); 822 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4); 823 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8); 824 AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8); 825 AssertCompileMemberAlignment(VMXVMCSINFO, pvShadowVmcs, 8); 826 AssertCompileMemberAlignment(VMXVMCSINFO, pbVirtApic, 8); 827 AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8); 828 AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrLoad, 8); 829 AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrStore, 8); 830 AssertCompileMemberAlignment(VMXVMCSINFO, pvHostMsrLoad, 8); 831 AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8); 832 AssertCompileMemberAlignment(VMXVMCSINFO, hMemObj, 8); 839 833 840 834 /** … … 843 837 * Note! If you change members of this struct, make sure to check if the 844 838 * assembly counterpart in HMInternal.mac needs to be updated as well. 839 * 840 * Note! The members here are ordered and aligned based on estimated frequency of 841 * usage and grouped to fit within a cache line in hot code paths. Even subtle 842 * changes here have a noticeable effect in the bootsector benchmarks. Modify with 843 * care. 845 844 */ 846 845 typedef struct HMCPU … … 848 847 /** Set when the TLB has been checked until we return from the world switch. */ 849 848 bool volatile fCheckedTLBFlush; 850 /** Set if we need to flush the TLB during the world switch. */851 bool fForceTLBFlush;852 849 /** Set when we're using VT-x or AMD-V at that moment. */ 853 850 bool fActive; … … 856 853 /** Whether we're using the hyper DR7 or guest DR7. */ 857 854 bool fUsingHyperDR7; 858 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 859 * execution. */ 860 bool fLoadSaveGuestXcr0; 861 855 856 /** Set if we need to flush the TLB during the world switch. */ 857 bool fForceTLBFlush; 862 858 /** Whether we should use the debug loop because of single stepping or special 863 859 * debug breakpoints / events are armed. */ … … 868 864 /** Set if we using the debug loop and wish to intercept RDTSC. */ 869 865 bool fDebugWantRdTscExit; 870 /** Whether we're executing a single instruction. */ 871 bool fSingleInstruction; 872 /** Set if we need to clear the trap flag because of single stepping. */ 873 bool fClearTrapFlag; 874 866 867 /** Set if XCR0 needs to be saved/restored when entering/exiting guest code 868 * execution. */ 869 bool fLoadSaveGuestXcr0; 875 870 /** Whether \#UD needs to be intercepted (required by certain GIM providers). */ 876 871 bool fGIMTrapXcptUD; 877 872 /** Whether \#GP needs to be intercept for mesa driver workaround. */ 878 873 bool fTrapXcptGpForLovelyMesaDrv; 879 uint8_t u8Alignment0[3]; 874 /** Whether we're executing a single instruction. */ 875 bool fSingleInstruction; 876 877 /** Set if we need to clear the trap flag because of single stepping. */ 878 bool fClearTrapFlag; 879 bool afAlignment0[3]; 880 880 881 881 /** World switch exit counter. */ … … 1005 1005 HMEVENT Event; 1006 1006 1007 /** The CPU ID of the CPU currently owning the VMCS. Set in 1008 * HMR0Enter and cleared in HMR0Leave. */ 1009 RTCPUID idEnteredCpu; 1010 1011 /** Current shadow paging mode for updating CR4. */ 1012 PGMMODE enmShadowMode; 1013 1007 1014 /** The PAE PDPEs used with Nested Paging (only valid when 1008 1015 * VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */ 1009 1016 X86PDPE aPdpes[4]; 1010 1011 /** Current shadow paging mode for updating CR4. */1012 PGMMODE enmShadowMode;1013 1014 /** The CPU ID of the CPU currently owning the VMCS. Set in1015 * HMR0Enter and cleared in HMR0Leave. */1016 RTCPUID idEnteredCpu;1017 1017 1018 1018 /** For saving stack space, the disassembler state is allocated here instead of … … 1166 1166 /** Pointer to HM VMCPU instance data. */ 1167 1167 typedef HMCPU *PHMCPU; 1168 AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush, 4); 1169 AssertCompileMemberAlignment(HMCPU, fForceTLBFlush, 4); 1168 1170 AssertCompileMemberAlignment(HMCPU, cWorldSwitchExits, 4); 1169 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);1171 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8); 1170 1172 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8); 1173 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfo, 8); 1174 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfoNstGst, 8); 1175 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.RestoreHost, 8); 1171 1176 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8); 1172 1177 AssertCompileMemberAlignment(HMCPU, Event, 8); -
trunk/src/VBox/VMM/include/HMInternal.mac
r80150 r81637 17 17 18 18 struc HMCPU 19 .fCheckedTLBFlush resb 1 20 .fForceTLBFlush resb 1 21 .fActive resb 1 22 .fLeaveDone resb 1 23 .fUsingHyperDR7 resb 1 24 .fLoadSaveGuestXcr0 resb 1 19 .fCheckedTLBFlush resb 1 20 .fActive resb 1 21 .fLeaveDone resb 1 22 .fUsingHyperDR7 resb 1 23 .fForceTLBFlush resb 1 24 .fUseDebugLoop resb 1 25 .fUsingDebugLoop resb 1 26 .fDebugWantRdTscExit resb 1 25 27 26 .fUseDebugLoop resb 1 27 .fUsingDebugLoop resb 1 28 .fDebugWantRdTscExit resb 1 29 .fSingleInstruction resb 1 30 .fClearTrapFlag resb 1 31 32 .fGIMTrapXcptUD resb 1 33 .fTrapXcptGpForLovelyMesaDrv resb 1 28 .fLoadSaveGuestXcr0 resb 1 29 .fGIMTrapXcptUD resb 1 30 .fTrapXcptGpForLovelyMesaDrv resb 1 31 .fSingleInstruction resb 1 32 .fClearTrapFlag resb 1 34 33 alignb 8 35 34 36 .cWorldSwitchExits resd 137 .idLastCpu resd 138 .cTlbFlushes resd 139 .uCurrentAsid resd 140 .u32HMError resd 141 alignb 842 .fCtxChanged resq 135 .cWorldSwitchExits resd 1 36 .idLastCpu resd 1 37 .cTlbFlushes resd 1 38 .uCurrentAsid resd 1 39 .u32HMError resd 1 40 .rcLastExitToR3 resd 1 41 .fCtxChanged resq 1 43 42 44 43 ; incomplete to save unnecessary pain...
Note:
See TracChangeset
for help on using the changeset viewer.