VirtualBox

Changeset 81637 in vbox


Ignore:
Timestamp:
Nov 4, 2019 4:22:10 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Revamped allocation of ring-0 VMCS and related data structures. We no longer are bound
to addresses below 4G (with removal of 32-bit host support).

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r81579 r81637  
    285285/** Pointer to a const VMX transient state. */
    286286typedef const VMXTRANSIENT *PCVMXTRANSIENT;
     287
     288/**
     289 * VMX page allocation information.
     290 */
     291typedef struct
     292{
     293    uint32_t    fValid;       /**< Whether to allocate this page (e.g, based on a CPU feature). */
     294    uint32_t    uPadding0;    /**< Padding to ensure array of these structs are aligned to a multiple of 8. */
     295    PRTHCPHYS   pHCPhys;      /**< Where to store the host-physical address of the allocation. */
     296    PRTR0PTR    ppVirt;       /**< Where to store the host-virtual address of the allocation. */
     297} VMXPAGEALLOCINFO;
     298/** Pointer to VMX page-allocation info. */
     299typedef VMXPAGEALLOCINFO *PVMXPAGEALLOCINFO;
     300/** Pointer to a const VMX page-allocation info. */
     301typedef const VMXPAGEALLOCINFO *PCVMXPAGEALLOCINFO;
     302AssertCompileSizeAlignment(VMXPAGEALLOCINFO, 8);
    287303
    288304/**
     
    16841700
    16851701/**
    1686  * Allocates and maps a physically contiguous page. The allocated page is
    1687  * zero'd out (used by various VT-x structures).
    1688  *
    1689  * @returns IPRT status code.
    1690  * @param   pMemObj     Pointer to the ring-0 memory object.
    1691  * @param   ppVirt      Where to store the virtual address of the allocation.
    1692  * @param   pHCPhys     Where to store the physical address of the allocation.
    1693  */
    1694 static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
    1695 {
    1696     AssertPtr(pMemObj);
    1697     AssertPtr(ppVirt);
    1698     AssertPtr(pHCPhys);
    1699     int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */);
    1700     if (RT_FAILURE(rc))
    1701         return rc;
    1702     *ppVirt  = RTR0MemObjAddress(*pMemObj);
    1703     *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
    1704     ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE);
     1702 * Allocates pages specified as specified by an array of VMX page allocation info
     1703 * objects.
     1704 *
     1705 * The pages contents are zero'd after allocation.
     1706 *
     1707 * @returns VBox status code.
     1708 * @param   hMemObj         The ring-0 memory object associated with the allocation.
     1709 * @param   paAllocInfo     The pointer to the first element of the VMX
     1710 *                          page-allocation info object array.
     1711 * @param   cEntries        The number of elements in the @a paAllocInfo array.
     1712 */
     1713static int hmR0VmxPagesAllocZ(RTR0MEMOBJ hMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
     1714{
     1715    /* Figure out how many pages to allocate. */
     1716    uint32_t cPages = 0;
     1717    for (uint32_t iPage = 0; iPage < cEntries; iPage++)
     1718        cPages += !!paAllocInfo[iPage].fValid;
     1719
     1720    /* Allocate the pages. */
     1721    if (cPages)
     1722    {
     1723        size_t const cbPages = cPages << X86_PAGE_4K_SHIFT;
     1724        int rc = RTR0MemObjAllocPage(&hMemObj, cbPages, false /* fExecutable */);
     1725        if (RT_FAILURE(rc))
     1726            return rc;
     1727
     1728        /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */
     1729        void *pvFirstPage = RTR0MemObjAddress(hMemObj);
     1730        ASMMemZero32(pvFirstPage, cbPages);
     1731
     1732        uint32_t iPage = 0;
     1733        for (uint32_t i = 0; i < cEntries; i++)
     1734            if (paAllocInfo[i].fValid)
     1735            {
     1736                RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(hMemObj, iPage);
     1737                void          *pvPage     = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT));
     1738                Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS);
     1739                AssertPtr(pvPage);
     1740
     1741                Assert(paAllocInfo[iPage].pHCPhys);
     1742                Assert(paAllocInfo[iPage].ppVirt);
     1743                *paAllocInfo[iPage].pHCPhys = HCPhysPage;
     1744                *paAllocInfo[iPage].ppVirt  = pvPage;
     1745
     1746                /* Move to next page. */
     1747                ++iPage;
     1748            }
     1749
     1750        /* Make sure all valid (requested) pages have been assigned. */
     1751        Assert(iPage == cPages);
     1752    }
    17051753    return VINF_SUCCESS;
    17061754}
     
    17081756
    17091757/**
    1710  * Frees and unmaps an allocated, physical page.
    1711  *
    1712  * @param   pMemObj     Pointer to the ring-0 memory object.
    1713  * @param   ppVirt      Where to re-initialize the virtual address of allocation as
    1714  *                      0.
    1715  * @param   pHCPhys     Where to re-initialize the physical address of the
    1716  *                      allocation as 0.
    1717  */
    1718 static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
    1719 {
    1720     AssertPtr(pMemObj);
    1721     AssertPtr(ppVirt);
    1722     AssertPtr(pHCPhys);
    1723     /* NULL is valid, accepted and ignored by the free function below. */
    1724     RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
    1725     *pMemObj = NIL_RTR0MEMOBJ;
    1726     *ppVirt  = NULL;
    1727     *pHCPhys = NIL_RTHCPHYS;
     1758 * Frees pages allocated using hmR0VmxPagesAllocZ.
     1759 *
     1760 * @param   hMemObj     The ring-0 memory object associated with the allocation.
     1761 */
     1762DECL_FORCE_INLINE(void) hmR0VmxPagesFree(RTR0MEMOBJ hMemObj)
     1763{
     1764    /* We can cleanup wholesale since it's all one allocation. */
     1765    RTR0MemObjFree(hMemObj, true /* fFreeMappings */);
    17281766}
    17291767
     
    17341772 * @param   pVmcsInfo   The VMCS info. object.
    17351773 */
    1736 static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo)
     1774static void hmR0VmxVmcsInfoInit(PVMXVMCSINFO pVmcsInfo)
    17371775{
    17381776    memset(pVmcsInfo, 0, sizeof(*pVmcsInfo));
    17391777
    1740     Assert(pVmcsInfo->hMemObjVmcs          == NIL_RTR0MEMOBJ);
    1741     Assert(pVmcsInfo->hMemObjShadowVmcs    == NIL_RTR0MEMOBJ);
    1742     Assert(pVmcsInfo->hMemObjMsrBitmap     == NIL_RTR0MEMOBJ);
    1743     Assert(pVmcsInfo->hMemObjGuestMsrLoad  == NIL_RTR0MEMOBJ);
    1744     Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
    1745     Assert(pVmcsInfo->hMemObjHostMsrLoad   == NIL_RTR0MEMOBJ);
     1778    Assert(pVmcsInfo->hMemObj == NIL_RTR0MEMOBJ);
    17461779    pVmcsInfo->HCPhysVmcs          = NIL_RTHCPHYS;
    17471780    pVmcsInfo->HCPhysShadowVmcs    = NIL_RTHCPHYS;
     
    17611794 * Frees the VT-x structures for a VMCS info. object.
    17621795 *
    1763  * @param   pVM         The cross context VM structure.
    17641796 * @param   pVmcsInfo   The VMCS info. object.
    17651797 */
    1766 static void hmR0VmxFreeVmcsInfo(PVMCC pVM, PVMXVMCSINFO pVmcsInfo)
    1767 {
    1768     hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
    1769 
    1770 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1771     if (pVM->hm.s.vmx.fUseVmcsShadowing)
    1772         hmR0VmxPageFree(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs);
    1773 #endif
    1774 
    1775     if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1776         hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
    1777 
    1778     hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad,   &pVmcsInfo->pvHostMsrLoad,   &pVmcsInfo->HCPhysHostMsrLoad);
    1779     hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad,  &pVmcsInfo->pvGuestMsrLoad,  &pVmcsInfo->HCPhysGuestMsrLoad);
    1780     hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore);
    1781 
    1782     hmR0VmxInitVmcsInfo(pVmcsInfo);
     1798static void hmR0VmxVmcsInfoFree(PVMXVMCSINFO pVmcsInfo)
     1799{
     1800    if (pVmcsInfo->hMemObj != NIL_RTR0MEMOBJ)
     1801    {
     1802        hmR0VmxPagesFree(pVmcsInfo->hMemObj);
     1803        hmR0VmxVmcsInfoInit(pVmcsInfo);
     1804    }
    17831805}
    17841806
     
    17911813 * @param   pVmcsInfo       The VMCS info. object.
    17921814 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     1815 *
     1816 * @remarks The caller is expected to take care of any and all allocation failures.
     1817 *          This function will not perform any cleanup for failures half-way
     1818 *          through.
    17931819 */
    17941820static int hmR0VmxAllocVmcsInfo(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
     
    17961822    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    17971823
    1798     /* Allocate the guest VM control structure (VMCS). */
    1799     int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
    1800     if (RT_SUCCESS(rc))
     1824    bool const fMsrBitmaps = RT_BOOL(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     1825    bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hm.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing;
     1826    Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing);  /* VMCS shadowing is not yet exposed to the guest. */
     1827    VMXPAGEALLOCINFO aAllocInfo[] = {
     1828        { true,        0 /* Unused */, &pVmcsInfo->HCPhysVmcs,         &pVmcsInfo->pvVmcs         },
     1829        { true,        0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad },
     1830        { true,        0 /* Unused */, &pVmcsInfo->HCPhysHostMsrLoad,  &pVmcsInfo->pvHostMsrLoad  },
     1831        { fMsrBitmaps, 0 /* Unused */, &pVmcsInfo->HCPhysMsrBitmap,    &pVmcsInfo->pvMsrBitmap    },
     1832        { fShadowVmcs, 0 /* Unused */, &pVmcsInfo->HCPhysShadowVmcs,   &pVmcsInfo->pvShadowVmcs   },
     1833    };
     1834
     1835    int rc = hmR0VmxPagesAllocZ(pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
     1836    if (RT_FAILURE(rc))
     1837        return rc;
     1838
     1839    /*
     1840     * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
     1841     * Because they contain a symmetric list of guest MSRs to load on VM-entry and store on VM-exit.
     1842     */
     1843    AssertCompile(RT_ELEMENTS(aAllocInfo) > 0);
     1844    Assert(pVmcsInfo->HCPhysGuestMsrLoad != NIL_RTHCPHYS);
     1845    pVmcsInfo->pvGuestMsrStore     = pVmcsInfo->pvGuestMsrLoad;
     1846    pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
     1847
     1848    /*
     1849     * Get the virtual-APIC page rather than allocating them again.
     1850     */
     1851    if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
    18011852    {
    18021853        if (!fIsNstGstVmcs)
    18031854        {
    1804 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1805             if (pVM->hm.s.vmx.fUseVmcsShadowing)
    1806                 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs);
    1807 #endif
    1808             if (RT_SUCCESS(rc))
     1855            if (PDMHasApic(pVM))
    18091856            {
    1810                 /* Get the allocated virtual-APIC page from the virtual APIC device. */
    1811                 if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
    1812                     && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
    1813                     rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/);
     1857                rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/);
     1858                if (RT_FAILURE(rc))
     1859                    return rc;
    18141860            }
    18151861        }
    18161862        else
    1817         {
    1818             /* We don't yet support exposing VMCS shadowing to the guest. */
    1819             Assert(pVmcsInfo->HCPhysShadowVmcs == NIL_RTHCPHYS);
    1820             Assert(!pVmcsInfo->pvShadowVmcs);
    1821 
    1822             /* Get the allocated virtual-APIC page from CPUM. */
    1823             if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
    1824             {
    1825                 /** @todo NSTVMX: Get rid of this. There is no need to allocate a separate HC
    1826                  *        page for this. Use the one provided by the nested-guest directly. */
    1827                 pVmcsInfo->pbVirtApic = (uint8_t *)CPUMGetGuestVmxVirtApicPage(pVCpu, &pVCpu->cpum.GstCtx,
    1828                                                                                &pVmcsInfo->HCPhysVirtApic);
    1829                 Assert(pVmcsInfo->pbVirtApic);
    1830                 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
    1831             }
    1832         }
    1833 
    1834         if (RT_SUCCESS(rc))
    1835         {
    1836             /*
    1837              * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
    1838              * transparent accesses of specific MSRs.
    1839              *
    1840              * If the condition for enabling MSR bitmaps changes here, don't forget to
    1841              * update HMIsMsrBitmapActive().
    1842              *
    1843              * We don't share MSR bitmaps between the guest and nested-guest as we then
    1844              * don't need to care about carefully restoring the guest MSR bitmap.
    1845              * The guest visible nested-guest MSR bitmap needs to remain unchanged.
    1846              * Hence, allocate a separate MSR bitmap for the guest and nested-guest.
    1847              * We also don't need to re-initialize the nested-guest MSR bitmap here as
    1848              * we do that later while merging VMCS.
    1849              */
    1850             if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1851             {
    1852                 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
    1853                 if (   RT_SUCCESS(rc)
    1854                     && !fIsNstGstVmcs)
    1855                     ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
    1856             }
    1857 
    1858             if (RT_SUCCESS(rc))
    1859             {
    1860                 /*
    1861                  * Allocate the VM-entry MSR-load area for the guest MSRs.
    1862                  *
    1863                  * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between
    1864                  * the guest and nested-guest.
    1865                  */
    1866                 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad,
    1867                                        &pVmcsInfo->HCPhysGuestMsrLoad);
    1868                 if (RT_SUCCESS(rc))
    1869                 {
    1870                     /*
    1871                      * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
    1872                      * These contain the guest MSRs to load on VM-entry and store on VM-exit.
    1873                      */
    1874                     Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
    1875                     pVmcsInfo->pvGuestMsrStore     = pVmcsInfo->pvGuestMsrLoad;
    1876                     pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
    1877 
    1878                     /* Allocate the VM-exit MSR-load page for the host MSRs. */
    1879                     rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad,
    1880                                            &pVmcsInfo->HCPhysHostMsrLoad);
    1881                 }
    1882             }
    1883         }
    1884     }
    1885 
    1886     return rc;
     1863            pVmcsInfo->pbVirtApic = (uint8_t *)CPUMGetGuestVmxVirtApicPage(pVCpu, &pVCpu->cpum.GstCtx,
     1864                                                                           &pVmcsInfo->HCPhysVirtApic);
     1865        Assert(pVmcsInfo->pbVirtApic);
     1866        Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
     1867    }
     1868
     1869    return VINF_SUCCESS;
    18871870}
    18881871
     
    18961879static void hmR0VmxStructsFree(PVMCC pVM)
    18971880{
    1898 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    1899     hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
    1900 #endif
    1901     hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
    1902 
     1881    hmR0VmxPagesFree(pVM->hm.s.vmx.hMemObj);
    19031882#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    19041883    if (pVM->hm.s.vmx.fUseVmcsShadowing)
     
    19061885        RTMemFree(pVM->hm.s.vmx.paShadowVmcsFields);
    19071886        RTMemFree(pVM->hm.s.vmx.paShadowVmcsRoFields);
    1908         hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmreadBitmap,  &pVM->hm.s.vmx.pvVmreadBitmap,  &pVM->hm.s.vmx.HCPhysVmreadBitmap);
    1909         hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, &pVM->hm.s.vmx.HCPhysVmwriteBitmap);
    19101887    }
    19111888#endif
     
    19141891    {
    19151892        PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    1916         PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
    1917         hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
     1893        hmR0VmxVmcsInfoFree(&pVCpu->hm.s.vmx.VmcsInfo);
    19181894#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    19191895        if (pVM->cpum.ro.GuestFeatures.fVmx)
    1920         {
    1921             pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
    1922             hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
    1923         }
     1896            hmR0VmxVmcsInfoFree(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
    19241897#endif
    19251898    }
     
    19321905 * @returns IPRT status code.
    19331906 * @param   pVM     The cross context VM structure.
     1907 *
     1908 * @remarks This functions will cleanup on memory allocation failures.
    19341909 */
    19351910static int hmR0VmxStructsAlloc(PVMCC pVM)
     
    19511926
    19521927    /*
    1953      * Initialize/check members up-front so we can cleanup en masse on allocation failures.
    1954      */
     1928     * Allocate per-VM VT-x structures.
     1929     */
     1930    bool const fVirtApicAccess   = RT_BOOL(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     1931    bool const fUseVmcsShadowing = pVM->hm.s.vmx.fUseVmcsShadowing;
     1932    VMXPAGEALLOCINFO aAllocInfo[] = {
     1933        { fVirtApicAccess,   0 /* Unused */, &pVM->hm.s.vmx.HCPhysApicAccess,    (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess },
     1934        { fUseVmcsShadowing, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysVmreadBitmap,  &pVM->hm.s.vmx.pvVmreadBitmap         },
     1935        { fUseVmcsShadowing, 0 /* Unused */, &pVM->hm.s.vmx.HCPhysVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap        },
    19551936#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    1956     Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ);
    1957     Assert(pVM->hm.s.vmx.pbScratch == NULL);
    1958     pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
     1937        { true,              0 /* Unused */, &pVM->hm.s.vmx.HCPhysScratch,       &(PRTR0PTR)pVM->hm.s.vmx.pbScratch    },
    19591938#endif
    1960 
    1961     Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ);
    1962     Assert(pVM->hm.s.vmx.pbApicAccess == NULL);
    1963     pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS;
    1964 
    1965     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    1966     {
    1967         PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    1968         hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo);
    1969         hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
    1970     }
    1971 
    1972     /*
    1973      * Allocate per-VM VT-x structures.
    1974      */
    1975     int rc = VINF_SUCCESS;
    1976 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    1977     /* Allocate crash-dump magic scratch page. */
    1978     rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
     1939    };
     1940
     1941    int rc = hmR0VmxPagesAllocZ(pVM->hm.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
    19791942    if (RT_FAILURE(rc))
    1980     {
    1981         hmR0VmxStructsFree(pVM);
    1982         return rc;
    1983     }
    1984     strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
    1985     *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
    1986 #endif
    1987 
    1988     /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
    1989     if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    1990     {
    1991         rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
    1992                                &pVM->hm.s.vmx.HCPhysApicAccess);
    1993         if (RT_FAILURE(rc))
    1994         {
    1995             hmR0VmxStructsFree(pVM);
    1996             return rc;
    1997         }
    1998     }
     1943        goto cleanup;
    19991944
    20001945#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2001     /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps.. */
    2002     if (pVM->hm.s.vmx.fUseVmcsShadowing)
     1946    /* Allocate the shadow VMCS-fields array. */
     1947    if (fUseVmcsShadowing)
    20031948    {
    20041949        Assert(!pVM->hm.s.vmx.cShadowVmcsFields);
     
    20081953        if (RT_LIKELY(   pVM->hm.s.vmx.paShadowVmcsFields
    20091954                      && pVM->hm.s.vmx.paShadowVmcsRoFields))
    2010         {
    2011             rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap,
    2012                                    &pVM->hm.s.vmx.HCPhysVmreadBitmap);
    2013             if (RT_SUCCESS(rc))
    2014             {
    2015                 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap,
    2016                                        &pVM->hm.s.vmx.HCPhysVmwriteBitmap);
    2017             }
    2018         }
     1955        { /* likely */ }
    20191956        else
     1957        {
    20201958            rc = VERR_NO_MEMORY;
    2021 
    2022         if (RT_FAILURE(rc))
    2023         {
    2024             hmR0VmxStructsFree(pVM);
    2025             return rc;
     1959            goto cleanup;
    20261960        }
    20271961    }
     
    20291963
    20301964    /*
    2031      * Initialize per-VCPU VT-x structures.
     1965     * Allocate per-VCPU VT-x structures.
    20321966     */
    20331967    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     
    20361970        PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    20371971        rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
    2038         if (RT_SUCCESS(rc))
    2039         {
     1972        if (RT_FAILURE(rc))
     1973            goto cleanup;
     1974
    20401975#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2041             /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
    2042             if (pVM->cpum.ro.GuestFeatures.fVmx)
    2043             {
    2044                 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
    2045                 if (RT_SUCCESS(rc))
    2046                 { /* likely */ }
    2047                 else
    2048                     break;
    2049             }
     1976        /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
     1977        if (pVM->cpum.ro.GuestFeatures.fVmx)
     1978        {
     1979            rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
     1980            if (RT_FAILURE(rc))
     1981                goto cleanup;
     1982        }
    20501983#endif
    2051         }
    2052         else
    2053             break;
    2054     }
    2055 
    2056     if (RT_FAILURE(rc))
    2057     {
    2058         hmR0VmxStructsFree(pVM);
    2059         return rc;
    20601984    }
    20611985
    20621986    return VINF_SUCCESS;
     1987
     1988cleanup:
     1989    hmR0VmxStructsFree(pVM);
     1990    Assert(rc != VINF_SUCCESS);
     1991    return rc;
     1992}
     1993
     1994
     1995/**
     1996 * Pre-initializes non-zero fields in VMX structures that will be allocated.
     1997 *
     1998 * @param   pVM     The cross context VM structure.
     1999 */
     2000static void hmR0VmxStructsInit(PVMCC pVM)
     2001{
     2002    /* Paranoia. */
     2003    Assert(pVM->hm.s.vmx.pbApicAccess == NULL);
     2004#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     2005    Assert(pVM->hm.s.vmx.pbScratch == NULL);
     2006#endif
     2007
     2008    /*
     2009     * Initialize members up-front so we can cleanup en masse on allocation failures.
     2010     */
     2011#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     2012    pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
     2013#endif
     2014    pVM->hm.s.vmx.HCPhysApicAccess    = NIL_RTHCPHYS;
     2015    pVM->hm.s.vmx.HCPhysVmreadBitmap  = NIL_RTHCPHYS;
     2016    pVM->hm.s.vmx.HCPhysVmwriteBitmap = NIL_RTHCPHYS;
     2017    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     2018    {
     2019        PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
     2020        hmR0VmxVmcsInfoInit(&pVCpu->hm.s.vmx.VmcsInfo);
     2021        hmR0VmxVmcsInfoInit(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
     2022    }
    20632023}
    20642024
     
    33813341             * However, if the guest can write to all fields (including read-only fields),
    33823342             * we treat it a as read/write field. Otherwise, writing to these fields would
    3383              * cause a VMWRITE instruction error while syncing the shadow VMCS .
     3343             * cause a VMWRITE instruction error while syncing the shadow VMCS.
    33843344             */
    33853345            if (   fGstVmwriteAll
     
    34063366{
    34073367    /*
    3408      * By default, ensure guest attempts to acceses to any VMCS fields cause VM-exits.
     3368     * By default, ensure guest attempts to access any VMCS fields cause VM-exits.
    34093369     */
    34103370    uint32_t const cbBitmap        = X86_PAGE_4K_SIZE;
     
    35653525{
    35663526    Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     3527
     3528    /*
     3529     * By default, ensure guest attempts to access any MSR cause VM-exits.
     3530     * This shall later be relaxed for specific MSRs as necessary.
     3531     *
     3532     * Note: For nested-guests, the entire bitmap will be merged prior to
     3533     * executing the nested-guest using hardware-assisted VMX and hence there
     3534     * is no need to perform this operation. See hmR0VmxMergeMsrBitmapNested.
     3535     */
     3536    Assert(pVmcsInfo->pvMsrBitmap);
     3537    ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
    35673538
    35683539    /*
     
    41934164    LogFlowFunc(("pVM=%p\n", pVM));
    41944165
     4166    hmR0VmxStructsInit(pVM);
    41954167    int rc = hmR0VmxStructsAlloc(pVM);
    41964168    if (RT_FAILURE(rc))
     
    42004172    }
    42014173
     4174    /* Setup the crash dump page. */
     4175#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     4176    strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
     4177    *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
     4178#endif
    42024179    return VINF_SUCCESS;
    42034180}
  • trunk/src/VBox/VMM/include/HMInternal.h

    r81578 r81637  
    448448typedef struct HM
    449449{
     450    /** Set if nested paging is enabled. */
     451    bool                        fNestedPaging;
    450452    /** Set when we've initialized VMX or SVM. */
    451453    bool                        fInitialized;
    452     /** Set if nested paging is enabled. */
    453     bool                        fNestedPaging;
    454454    /** Set if nested paging is allowed. */
    455455    bool                        fAllowNestedPaging;
     
    525525        uint8_t                     cPreemptTimerShift;
    526526
    527         /** Virtual address of the TSS page used for real mode emulation. */
    528         R3PTRTYPE(PVBOXTSS)         pRealModeTSS;
    529         /** Virtual address of the identity page table used for real mode and protected
    530          *  mode without paging emulation in EPT mode. */
    531         R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
    532 
    533         /** Physical address of the APIC-access page. */
    534         RTHCPHYS                    HCPhysApicAccess;
    535         /** R0 memory object for the APIC-access page. */
    536         RTR0MEMOBJ                  hMemObjApicAccess;
    537527        /** Virtual address of the APIC-access page. */
    538528        R0PTRTYPE(uint8_t *)        pbApicAccess;
    539 
    540         /** Physical address of the VMREAD bitmap. */
    541         RTHCPHYS                    HCPhysVmreadBitmap;
    542         /** Ring-0 memory object for the VMREAD bitmap. */
    543         RTR0MEMOBJ                  hMemObjVmreadBitmap;
    544529        /** Pointer to the VMREAD bitmap. */
    545530        R0PTRTYPE(void *)           pvVmreadBitmap;
    546 
    547         /** Physical address of the VMWRITE bitmap. */
    548         RTHCPHYS                    HCPhysVmwriteBitmap;
    549         /** Ring-0 memory object for the VMWRITE bitmap. */
    550         RTR0MEMOBJ                  hMemObjVmwriteBitmap;
    551531        /** Pointer to the VMWRITE bitmap. */
    552532        R0PTRTYPE(void *)           pvVmwriteBitmap;
    553533
    554 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    555         /** Physical address of the crash-dump scratch area. */
    556         RTHCPHYS                    HCPhysScratch;
    557         /** Ring-0 memory object for the crash-dump scratch area. */
    558         RTR0MEMOBJ                  hMemObjScratch;
    559         /** Pointer to the crash-dump scratch bitmap. */
    560         R0PTRTYPE(uint8_t *)        pbScratch;
    561 #endif
     534        /** Pointer to the shadow VMCS read-only fields array. */
     535        R0PTRTYPE(uint32_t *)       paShadowVmcsRoFields;
     536        /** Pointer to the shadow VMCS read/write fields array. */
     537        R0PTRTYPE(uint32_t *)       paShadowVmcsFields;
     538        /** Number of elements in the shadow VMCS read-only fields array. */
     539        uint32_t                    cShadowVmcsRoFields;
     540        /** Number of elements in the shadow VMCS read-write fields array. */
     541        uint32_t                    cShadowVmcsFields;
    562542
    563543        /** Tagged-TLB flush type. */
     
    591571        /** Host-physical address for a failing VMXON instruction. */
    592572        RTHCPHYS                    HCPhysVmxEnableError;
    593 
    594         /** Pointer to the shadow VMCS read-only fields array. */
    595         R0PTRTYPE(uint32_t *)       paShadowVmcsRoFields;
    596         /** Pointer to the shadow VMCS read/write fields array. */
    597         R0PTRTYPE(uint32_t *)       paShadowVmcsFields;
    598         /** Number of elements in the shadow VMCS read-only fields array. */
    599         uint32_t                    cShadowVmcsRoFields;
    600         /** Number of elements in the shadow VMCS read-write fields array. */
    601         uint32_t                    cShadowVmcsFields;
     573        /** Host-physical address of the APIC-access page. */
     574        RTHCPHYS                    HCPhysApicAccess;
     575        /** Host-physical address of the VMREAD bitmap. */
     576        RTHCPHYS                    HCPhysVmreadBitmap;
     577        /** Host-physical address of the VMWRITE bitmap. */
     578        RTHCPHYS                    HCPhysVmwriteBitmap;
     579#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     580        /** Host-physical address of the crash-dump scratch area. */
     581        RTHCPHYS                    HCPhysScratch;
     582#endif
     583
     584#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     585        /** Pointer to the crash-dump scratch bitmap. */
     586        R0PTRTYPE(uint8_t *)        pbScratch;
     587#endif
     588        /** Virtual address of the TSS page used for real mode emulation. */
     589        R3PTRTYPE(PVBOXTSS)         pRealModeTSS;
     590        /** Virtual address of the identity page table used for real mode and protected
     591         *  mode without paging emulation in EPT mode. */
     592        R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
     593
     594        /** Ring-0 memory object for per-VM VMX structures. */
     595        RTR0MEMOBJ                  hMemObj;
    602596    } vmx;
    603597
     
    664658/** Pointer to HM VM instance data. */
    665659typedef HM *PHM;
    666 
    667660AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
     661AssertCompileMemberAlignment(HM, vmx,                 8);
     662AssertCompileMemberAlignment(HM, svm,                 8);
     663
    668664
    669665/**
     
    692688 * guest (or nested-guest) VMCS (VM control structure) using hardware-assisted VMX.
    693689 *
    694  * The members here are ordered and aligned based on estimated frequency of usage
    695  * and grouped to fit within a cache line in hot code paths.
     690 * Note! The members here are ordered and aligned based on estimated frequency of
     691 * usage and grouped to fit within a cache line in hot code paths. Even subtle
     692 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
     693 * care.
    696694 */
    697695typedef struct VMXVMCSINFO
     
    807805    /** @} */
    808806
    809     /** @name R0-memory objects address of VMCS and related data structures.
     807    /** @name R0-memory objects address for VMCS and related data structures.
    810808     *  @{ */
    811     /** The VMCS. */
    812     RTR0MEMOBJ                  hMemObjVmcs;
    813     /** R0 memory object for the shadow VMCS. */
    814     RTR0MEMOBJ                  hMemObjShadowVmcs;
    815     /** R0 memory object for the MSR bitmap. */
    816     RTR0MEMOBJ                  hMemObjMsrBitmap;
    817     /** R0 memory object of the VM-entry MSR-load area. */
    818     RTR0MEMOBJ                  hMemObjGuestMsrLoad;
    819     /** R0 memory object of the VM-exit MSR-store area. */
    820     RTR0MEMOBJ                  hMemObjGuestMsrStore;
    821     /** R0 memory object for the VM-exit MSR-load area. */
    822     RTR0MEMOBJ                  hMemObjHostMsrLoad;
     809    /** R0-memory object for VMCS and related data structures. */
     810    RTR0MEMOBJ                  hMemObj;
    823811    /** @} */
    824812
     
    831819typedef const VMXVMCSINFO *PCVMXVMCSINFO;
    832820AssertCompileSizeAlignment(VMXVMCSINFO, 8);
    833 AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM, 8);
    834 AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 4);
    835 AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8);
    836 AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs, 8);
    837 AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs, 8);
    838 AssertCompileMemberAlignment(VMXVMCSINFO, hMemObjVmcs, 8);
     821AssertCompileMemberAlignment(VMXVMCSINFO, pfnStartVM,      8);
     822AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls,      4);
     823AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr,  8);
     824AssertCompileMemberAlignment(VMXVMCSINFO, pvVmcs,          8);
     825AssertCompileMemberAlignment(VMXVMCSINFO, pvShadowVmcs,    8);
     826AssertCompileMemberAlignment(VMXVMCSINFO, pbVirtApic,      8);
     827AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap,     8);
     828AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrLoad,  8);
     829AssertCompileMemberAlignment(VMXVMCSINFO, pvGuestMsrStore, 8);
     830AssertCompileMemberAlignment(VMXVMCSINFO, pvHostMsrLoad,   8);
     831AssertCompileMemberAlignment(VMXVMCSINFO, HCPhysVmcs,      8);
     832AssertCompileMemberAlignment(VMXVMCSINFO, hMemObj,         8);
    839833
    840834/**
     
    843837 * Note! If you change members of this struct, make sure to check if the
    844838 * assembly counterpart in HMInternal.mac needs to be updated as well.
     839 *
     840 * Note! The members here are ordered and aligned based on estimated frequency of
     841 * usage and grouped to fit within a cache line in hot code paths. Even subtle
     842 * changes here have a noticeable effect in the bootsector benchmarks. Modify with
     843 * care.
    845844 */
    846845typedef struct HMCPU
     
    848847    /** Set when the TLB has been checked until we return from the world switch. */
    849848    bool volatile               fCheckedTLBFlush;
    850     /** Set if we need to flush the TLB during the world switch. */
    851     bool                        fForceTLBFlush;
    852849    /** Set when we're using VT-x or AMD-V at that moment. */
    853850    bool                        fActive;
     
    856853    /** Whether we're using the hyper DR7 or guest DR7. */
    857854    bool                        fUsingHyperDR7;
    858     /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
    859      *  execution. */
    860     bool                        fLoadSaveGuestXcr0;
    861 
     855
     856    /** Set if we need to flush the TLB during the world switch. */
     857    bool                        fForceTLBFlush;
    862858    /** Whether we should use the debug loop because of single stepping or special
    863859     *  debug breakpoints / events are armed. */
     
    868864    /** Set if we using the debug loop and wish to intercept RDTSC. */
    869865    bool                        fDebugWantRdTscExit;
    870     /** Whether we're executing a single instruction. */
    871     bool                        fSingleInstruction;
    872     /** Set if we need to clear the trap flag because of single stepping. */
    873     bool                        fClearTrapFlag;
    874 
     866
     867    /** Set if XCR0 needs to be saved/restored when entering/exiting guest code
     868     *  execution. */
     869    bool                        fLoadSaveGuestXcr0;
    875870    /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
    876871    bool                        fGIMTrapXcptUD;
    877872    /** Whether \#GP needs to be intercept for mesa driver workaround. */
    878873    bool                        fTrapXcptGpForLovelyMesaDrv;
    879     uint8_t                     u8Alignment0[3];
     874    /** Whether we're executing a single instruction. */
     875    bool                        fSingleInstruction;
     876
     877    /** Set if we need to clear the trap flag because of single stepping. */
     878    bool                        fClearTrapFlag;
     879    bool                        afAlignment0[3];
    880880
    881881    /** World switch exit counter. */
     
    10051005    HMEVENT                 Event;
    10061006
     1007    /** The CPU ID of the CPU currently owning the VMCS. Set in
     1008     * HMR0Enter and cleared in HMR0Leave. */
     1009    RTCPUID                 idEnteredCpu;
     1010
     1011    /** Current shadow paging mode for updating CR4. */
     1012    PGMMODE                 enmShadowMode;
     1013
    10071014    /** The PAE PDPEs used with Nested Paging (only valid when
    10081015     *  VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
    10091016    X86PDPE                 aPdpes[4];
    1010 
    1011     /** Current shadow paging mode for updating CR4. */
    1012     PGMMODE                 enmShadowMode;
    1013 
    1014     /** The CPU ID of the CPU currently owning the VMCS. Set in
    1015      * HMR0Enter and cleared in HMR0Leave. */
    1016     RTCPUID                 idEnteredCpu;
    10171017
    10181018    /** For saving stack space, the disassembler state is allocated here instead of
     
    11661166/** Pointer to HM VMCPU instance data. */
    11671167typedef HMCPU *PHMCPU;
     1168AssertCompileMemberAlignment(HMCPU, fCheckedTLBFlush,  4);
     1169AssertCompileMemberAlignment(HMCPU, fForceTLBFlush,    4);
    11681170AssertCompileMemberAlignment(HMCPU, cWorldSwitchExits, 4);
    1169 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
     1171AssertCompileMemberAlignment(HMCPU, fCtxChanged,       8);
    11701172AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8);
     1173AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfo,       8);
     1174AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfoNstGst, 8);
     1175AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.RestoreHost,    8);
    11711176AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8);
    11721177AssertCompileMemberAlignment(HMCPU, Event, 8);
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r80150 r81637  
    1717
    1818struc HMCPU
    19     .fCheckedTLBFlush       resb    1
    20     .fForceTLBFlush         resb    1
    21     .fActive                resb    1
    22     .fLeaveDone             resb    1
    23     .fUsingHyperDR7         resb    1
    24     .fLoadSaveGuestXcr0     resb    1
     19    .fCheckedTLBFlush               resb    1
     20    .fActive                        resb    1
     21    .fLeaveDone                     resb    1
     22    .fUsingHyperDR7                 resb    1
     23    .fForceTLBFlush                 resb    1
     24    .fUseDebugLoop                  resb    1
     25    .fUsingDebugLoop                resb    1
     26    .fDebugWantRdTscExit            resb    1
    2527
    26     .fUseDebugLoop          resb    1
    27     .fUsingDebugLoop        resb    1
    28     .fDebugWantRdTscExit    resb    1
    29     .fSingleInstruction     resb    1
    30     .fClearTrapFlag         resb    1
    31 
    32     .fGIMTrapXcptUD         resb    1
    33     .fTrapXcptGpForLovelyMesaDrv resb 1
     28    .fLoadSaveGuestXcr0             resb    1
     29    .fGIMTrapXcptUD                 resb    1
     30    .fTrapXcptGpForLovelyMesaDrv    resb    1
     31    .fSingleInstruction             resb    1
     32    .fClearTrapFlag                 resb    1
    3433    alignb 8
    3534
    36     .cWorldSwitchExits      resd    1
    37     .idLastCpu              resd    1
    38     .cTlbFlushes            resd    1
    39     .uCurrentAsid           resd    1
    40     .u32HMError             resd    1
    41     alignb 8
    42     .fCtxChanged            resq    1
     35    .cWorldSwitchExits              resd    1
     36    .idLastCpu                      resd    1
     37    .cTlbFlushes                    resd    1
     38    .uCurrentAsid                   resd    1
     39    .u32HMError                     resd    1
     40    .rcLastExitToR3                 resd    1
     41    .fCtxChanged                    resq    1
    4342
    4443    ; incomplete to save unnecessary pain...
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette