VirtualBox

Ignore:
Timestamp:
Jan 6, 2022 12:38:02 PM (3 years ago)
Author:
vboxsync
Message:

VMM,{HMVMXR0.cpp,VMXTemplate.cpp.h}: Make use of the VMX template code in HM, getting rid of the temporary code duplication, bugref:10136

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r93115 r93132  
    127127#endif
    128128
    129 #ifdef IN_RING0
     129#ifndef IN_NEM_DARWIN
    130130/** Assert that preemption is disabled or covered by thread-context hooks. */
    131131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu))   \
     
    743743
    744744
    745 #ifdef IN_RING0
    746 /**
    747  * Checks if the given MSR is part of the lastbranch-from-IP MSR stack.
    748  * @returns @c true if it's part of LBR stack, @c false otherwise.
    749  *
    750  * @param   pVM         The cross context VM structure.
    751  * @param   idMsr       The MSR.
    752  * @param   pidxMsr     Where to store the index of the MSR in the LBR MSR array.
    753  *                      Optional, can be NULL.
    754  *
    755  * @remarks Must only be called when LBR is enabled.
    756  */
    757 DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchFromMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
    758 {
    759     Assert(VM_IS_VMX_LBR(pVM));
    760     Assert(pVM->hmr0.s.vmx.idLbrFromIpMsrFirst);
    761     uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
    762     uint32_t const idxMsr    = idMsr - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
    763     if (idxMsr < cLbrStack)
    764     {
    765         if (pidxMsr)
    766             *pidxMsr = idxMsr;
    767         return true;
    768     }
    769     return false;
    770 }
    771 
    772 
    773 /**
    774  * Checks if the given MSR is part of the lastbranch-to-IP MSR stack.
    775  * @returns @c true if it's part of LBR stack, @c false otherwise.
    776  *
    777  * @param   pVM         The cross context VM structure.
    778  * @param   idMsr       The MSR.
    779  * @param   pidxMsr     Where to store the index of the MSR in the LBR MSR array.
    780  *                      Optional, can be NULL.
    781  *
    782  * @remarks Must only be called when LBR is enabled and when lastbranch-to-IP MSRs
    783  *          are supported by the CPU (see vmxHCSetupLbrMsrRange).
    784  */
    785 DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchToMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
    786 {
    787     Assert(VM_IS_VMX_LBR(pVM));
    788     if (pVM->hmr0.s.vmx.idLbrToIpMsrFirst)
    789     {
    790         uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrToIpMsrLast - pVM->hmr0.s.vmx.idLbrToIpMsrFirst + 1;
    791         uint32_t const idxMsr    = idMsr - pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
    792         if (idxMsr < cLbrStack)
    793         {
    794             if (pidxMsr)
    795                 *pidxMsr = idxMsr;
    796             return true;
    797         }
    798     }
    799     return false;
    800 }
    801 #endif
    802 
    803 
    804745/**
    805746 * Gets the CR0 guest/host mask.
     
    888829
    889830/**
    890  * Returns whether the VM-exit MSR-store area differs from the VM-exit MSR-load
    891  * area.
    892  *
    893  * @returns @c true if it's different, @c false otherwise.
    894  * @param   pVmcsInfo   The VMCS info. object.
    895  */
    896 DECL_FORCE_INLINE(bool) vmxHCIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
    897 {
    898     return RT_BOOL(   pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
    899                    && pVmcsInfo->pvGuestMsrStore);
    900 }
    901 
    902 #ifdef IN_RING0
    903 /**
    904  * Sets the given Processor-based VM-execution controls.
    905  *
    906  * @param   pVCpu           The cross context virtual CPU structure.
    907  * @param   pVmxTransient   The VMX-transient structure.
    908  * @param   uProcCtls       The Processor-based VM-execution controls to set.
    909  */
    910 static void vmxHCSetProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
    911 {
    912     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    913     if ((pVmcsInfo->u32ProcCtls & uProcCtls) != uProcCtls)
    914     {
    915         pVmcsInfo->u32ProcCtls |= uProcCtls;
    916         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    917         AssertRC(rc);
    918     }
    919 }
    920 
    921 
    922 /**
    923  * Removes the given Processor-based VM-execution controls.
    924  *
    925  * @param   pVCpu           The cross context virtual CPU structure.
    926  * @param   pVmxTransient   The VMX-transient structure.
    927  * @param   uProcCtls       The Processor-based VM-execution controls to remove.
    928  *
    929  * @remarks When executing a nested-guest, this will not remove any of the specified
    930  *          controls if the nested hypervisor has set any one of them.
    931  */
    932 static void vmxHCRemoveProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
    933 {
    934     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    935     if (pVmcsInfo->u32ProcCtls & uProcCtls)
    936     {
    937 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    938         if (   !pVmxTransient->fIsNestedGuest
    939             || !CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uProcCtls))
    940 #else
    941         NOREF(pVCpu);
    942         if (!pVmxTransient->fIsNestedGuest)
    943 #endif
    944         {
    945             pVmcsInfo->u32ProcCtls &= ~uProcCtls;
    946             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    947             AssertRC(rc);
    948         }
    949     }
    950 }
    951 
    952 
    953 /**
    954  * Sets the TSC offset for the current VMCS.
    955  *
    956  * @param   pVCpu           The cross context virtual CPU structure.
    957  * @param   uTscOffset      The TSC offset to set.
    958  * @param   pVmcsInfo       The VMCS info. object.
    959  */
    960 static void vmxHCSetTscOffsetVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t uTscOffset)
    961 {
    962     if (pVmcsInfo->u64TscOffset != uTscOffset)
    963     {
    964         int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
    965         AssertRC(rc);
    966         pVmcsInfo->u64TscOffset = uTscOffset;
    967     }
    968 }
    969 #endif
    970 
    971 /**
    972831 * Adds one or more exceptions to the exception bitmap and commits it to the current
    973832 * VMCS.
     
    1046905        {
    1047906            /* Validate we are not removing any essential exception intercepts. */
    1048 #ifdef IN_RING0
     907#ifndef IN_NEM_DARWIN
    1049908            Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
    1050909#else
     
    11441003    if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    11451004    {
    1146         int rc = vmxHCClearVmcs(pVmcsInfoFrom);
     1005        int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
    11471006        if (RT_SUCCESS(rc))
    11481007        {
     
    11671026    if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    11681027    {
    1169         int rc = vmxHCClearVmcs(pVmcsInfoTo);
     1028        int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
    11701029        if (RT_SUCCESS(rc))
    11711030        { /* likely */ }
     
    11771036     * Finally, load the VMCS we are switching to.
    11781037     */
    1179     return vmxHCLoadVmcs(pVmcsInfoTo);
     1038    return hmR0VmxLoadVmcs(pVmcsInfoTo);
    11801039}
    11811040
     
    12181077    {
    12191078        pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs           = fSwitchToNstGstVmcs;
    1220         VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
     1079        pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
    12211080
    12221081        /*
     
    12511110}
    12521111#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1253 
    1254 
    1255 #ifdef IN_RING0
    1256 /**
    1257  * Updates the VM's last error record.
    1258  *
    1259  * If there was a VMX instruction error, reads the error data from the VMCS and
    1260  * updates VCPU's last error record as well.
    1261  *
    1262  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    1263  *                  Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
    1264  *                  VERR_VMX_INVALID_VMCS_FIELD.
    1265  * @param   rc      The error code.
    1266  */
    1267 static void vmxHCUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
    1268 {
    1269     if (   rc == VERR_VMX_INVALID_VMCS_FIELD
    1270         || rc == VERR_VMX_UNABLE_TO_START_VM)
    1271     {
    1272         AssertPtrReturnVoid(pVCpu);
    1273         VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32InstrError);
    1274     }
    1275     pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
    1276 }
    1277 #endif
    12781112
    12791113
     
    15311365#endif
    15321366
    1533 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1534 /**
    1535  * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
    1536  *
    1537  * @returns @c true if the MSR is intercepted, @c false otherwise.
    1538  * @param   pbMsrBitmap     The MSR bitmap.
    1539  * @param   offMsr          The MSR byte offset.
    1540  * @param   iBit            The bit offset from the byte offset.
    1541  */
    1542 DECLINLINE(bool) vmxHCIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
    1543 {
    1544     Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
    1545     return ASMBitTest(pbMsrBitmap + offMsr, iBit);
    1546 }
    1547 #endif
    1548 
    1549 #ifdef IN_RING0
    1550 /**
    1551  * Sets the permission bits for the specified MSR in the given MSR bitmap.
    1552  *
    1553  * If the passed VMCS is a nested-guest VMCS, this function ensures that the
    1554  * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
    1555  * VMX execution of the nested-guest, only if nested-guest is also not intercepting
    1556  * the read/write access of this MSR.
    1557  *
    1558  * @param   pVCpu           The cross context virtual CPU structure.
    1559  * @param   pVmcsInfo       The VMCS info. object.
    1560  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    1561  * @param   idMsr           The MSR value.
    1562  * @param   fMsrpm          The MSR permissions (see VMXMSRPM_XXX). This must
    1563  *                          include both a read -and- a write permission!
    1564  *
    1565  * @sa      CPUMGetVmxMsrPermission.
    1566  * @remarks Can be called with interrupts disabled.
    1567  */
    1568 static void vmxHCSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
    1569 {
    1570     uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
    1571     Assert(pbMsrBitmap);
    1572     Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
    1573 
    1574     /*
    1575      * MSR-bitmap Layout:
    1576      *   Byte index            MSR range            Interpreted as
    1577      * 0x000 - 0x3ff    0x00000000 - 0x00001fff    Low MSR read bits.
    1578      * 0x400 - 0x7ff    0xc0000000 - 0xc0001fff    High MSR read bits.
    1579      * 0x800 - 0xbff    0x00000000 - 0x00001fff    Low MSR write bits.
    1580      * 0xc00 - 0xfff    0xc0000000 - 0xc0001fff    High MSR write bits.
    1581      *
    1582      * A bit corresponding to an MSR within the above range causes a VM-exit
    1583      * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
    1584      * the MSR range, it always cause a VM-exit.
    1585      *
    1586      * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    1587      */
    1588     uint16_t const offBitmapRead  = 0;
    1589     uint16_t const offBitmapWrite = 0x800;
    1590     uint16_t       offMsr;
    1591     int32_t        iBit;
    1592     if (idMsr <= UINT32_C(0x00001fff))
    1593     {
    1594         offMsr = 0;
    1595         iBit   = idMsr;
    1596     }
    1597     else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
    1598     {
    1599         offMsr = 0x400;
    1600         iBit   = idMsr - UINT32_C(0xc0000000);
    1601     }
    1602     else
    1603         AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
    1604 
    1605     /*
    1606      * Set the MSR read permission.
    1607      */
    1608     uint16_t const offMsrRead = offBitmapRead + offMsr;
    1609     Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
    1610     if (fMsrpm & VMXMSRPM_ALLOW_RD)
    1611     {
    1612 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1613         bool const fClear = !fIsNstGstVmcs ? true
    1614                           : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
    1615 #else
    1616         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    1617         bool const fClear = true;
    1618 #endif
    1619         if (fClear)
    1620             ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
    1621     }
    1622     else
    1623         ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
    1624 
    1625     /*
    1626      * Set the MSR write permission.
    1627      */
    1628     uint16_t const offMsrWrite = offBitmapWrite + offMsr;
    1629     Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
    1630     if (fMsrpm & VMXMSRPM_ALLOW_WR)
    1631     {
    1632 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1633         bool const fClear = !fIsNstGstVmcs ? true
    1634                           : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
    1635 #else
    1636         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    1637         bool const fClear = true;
    1638 #endif
    1639         if (fClear)
    1640             ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
    1641     }
    1642     else
    1643         ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
    1644 }
    1645 
    1646 
    1647 /**
    1648  * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
    1649  * area.
    1650  *
    1651  * @returns VBox status code.
    1652  * @param   pVCpu       The cross context virtual CPU structure.
    1653  * @param   pVmcsInfo   The VMCS info. object.
    1654  * @param   cMsrs       The number of MSRs.
    1655  */
    1656 static int vmxHCSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
    1657 {
    1658     /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    1659     uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
    1660     if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
    1661     {
    1662         /* Commit the MSR counts to the VMCS and update the cache. */
    1663         if (pVmcsInfo->cEntryMsrLoad != cMsrs)
    1664         {
    1665             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);   AssertRC(rc);
    1666             rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);   AssertRC(rc);
    1667             rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);   AssertRC(rc);
    1668             pVmcsInfo->cEntryMsrLoad = cMsrs;
    1669             pVmcsInfo->cExitMsrStore = cMsrs;
    1670             pVmcsInfo->cExitMsrLoad  = cMsrs;
    1671         }
    1672         return VINF_SUCCESS;
    1673     }
    1674 
    1675     LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
    1676     VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    1677     return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    1678 }
    1679 
    1680 
    1681 /**
    1682  * Adds a new (or updates the value of an existing) guest/host MSR
    1683  * pair to be swapped during the world-switch as part of the
    1684  * auto-load/store MSR area in the VMCS.
    1685  *
    1686  * @returns VBox status code.
    1687  * @param   pVCpu           The cross context virtual CPU structure.
    1688  * @param   pVmxTransient   The VMX-transient structure.
    1689  * @param   idMsr           The MSR.
    1690  * @param   uGuestMsrValue  Value of the guest MSR.
    1691  * @param   fSetReadWrite   Whether to set the guest read/write access of this
    1692  *                          MSR (thus not causing a VM-exit).
    1693  * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
    1694  *                          necessary.
    1695  */
    1696 static int vmxHCAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
    1697                                     bool fSetReadWrite, bool fUpdateHostMsr)
    1698 {
    1699     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    1700     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    1701     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1702     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    1703     uint32_t        i;
    1704 
    1705     /* Paranoia. */
    1706     Assert(pGuestMsrLoad);
    1707 
    1708 #ifndef DEBUG_bird
    1709     LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
    1710 #endif
    1711 
    1712     /* Check if the MSR already exists in the VM-entry MSR-load area. */
    1713     for (i = 0; i < cMsrs; i++)
    1714     {
    1715         if (pGuestMsrLoad[i].u32Msr == idMsr)
    1716             break;
    1717     }
    1718 
    1719     bool fAdded = false;
    1720     if (i == cMsrs)
    1721     {
    1722         /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
    1723         ++cMsrs;
    1724         int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    1725         AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
    1726 
    1727         /* Set the guest to read/write this MSR without causing VM-exits. */
    1728         if (   fSetReadWrite
    1729             && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    1730             vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
    1731 
    1732         Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    1733         fAdded = true;
    1734     }
    1735 
    1736     /* Update the MSR value for the newly added or already existing MSR. */
    1737     pGuestMsrLoad[i].u32Msr   = idMsr;
    1738     pGuestMsrLoad[i].u64Value = uGuestMsrValue;
    1739 
    1740     /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
    1741     if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    1742     {
    1743         PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1744         pGuestMsrStore[i].u32Msr   = idMsr;
    1745         pGuestMsrStore[i].u64Value = uGuestMsrValue;
    1746     }
    1747 
    1748     /* Update the corresponding slot in the host MSR area. */
    1749     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1750     Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
    1751     Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
    1752     pHostMsr[i].u32Msr = idMsr;
    1753 
    1754     /*
    1755      * Only if the caller requests to update the host MSR value AND we've newly added the
    1756      * MSR to the host MSR area do we actually update the value. Otherwise, it will be
    1757      * updated by vmxHCUpdateAutoLoadHostMsrs().
    1758      *
    1759      * We do this for performance reasons since reading MSRs may be quite expensive.
    1760      */
    1761     if (fAdded)
    1762     {
    1763         if (fUpdateHostMsr)
    1764         {
    1765             Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    1766             Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1767             pHostMsr[i].u64Value = ASMRdMsr(idMsr);
    1768         }
    1769         else
    1770         {
    1771             /* Someone else can do the work. */
    1772             pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    1773         }
    1774     }
    1775 
    1776     return VINF_SUCCESS;
    1777 }
    1778 
    1779 
    1780 /**
    1781  * Removes a guest/host MSR pair to be swapped during the world-switch from the
    1782  * auto-load/store MSR area in the VMCS.
    1783  *
    1784  * @returns VBox status code.
    1785  * @param   pVCpu           The cross context virtual CPU structure.
    1786  * @param   pVmxTransient   The VMX-transient structure.
    1787  * @param   idMsr           The MSR.
    1788  */
    1789 static int vmxHCRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
    1790 {
    1791     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    1792     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    1793     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1794     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    1795 
    1796 #ifndef DEBUG_bird
    1797     LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
    1798 #endif
    1799 
    1800     for (uint32_t i = 0; i < cMsrs; i++)
    1801     {
    1802         /* Find the MSR. */
    1803         if (pGuestMsrLoad[i].u32Msr == idMsr)
    1804         {
    1805             /*
    1806              * If it's the last MSR, we only need to reduce the MSR count.
    1807              * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
    1808              */
    1809             if (i < cMsrs - 1)
    1810             {
    1811                 /* Remove it from the VM-entry MSR-load area. */
    1812                 pGuestMsrLoad[i].u32Msr   = pGuestMsrLoad[cMsrs - 1].u32Msr;
    1813                 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
    1814 
    1815                 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
    1816                 if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    1817                 {
    1818                     PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1819                     Assert(pGuestMsrStore[i].u32Msr == idMsr);
    1820                     pGuestMsrStore[i].u32Msr   = pGuestMsrStore[cMsrs - 1].u32Msr;
    1821                     pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
    1822                 }
    1823 
    1824                 /* Remove it from the VM-exit MSR-load area. */
    1825                 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1826                 Assert(pHostMsr[i].u32Msr == idMsr);
    1827                 pHostMsr[i].u32Msr   = pHostMsr[cMsrs - 1].u32Msr;
    1828                 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
    1829             }
    1830 
    1831             /* Reduce the count to reflect the removed MSR and bail. */
    1832             --cMsrs;
    1833             break;
    1834         }
    1835     }
    1836 
    1837     /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
    1838     if (cMsrs != pVmcsInfo->cEntryMsrLoad)
    1839     {
    1840         int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    1841         AssertRCReturn(rc, rc);
    1842 
    1843         /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
    1844         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1845             vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
    1846 
    1847         Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    1848         return VINF_SUCCESS;
    1849     }
    1850 
    1851     return VERR_NOT_FOUND;
    1852 }
    1853 
    1854 
    1855 /**
    1856  * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
    1857  *
    1858  * @returns @c true if found, @c false otherwise.
    1859  * @param   pVmcsInfo   The VMCS info. object.
    1860  * @param   idMsr       The MSR to find.
    1861  */
    1862 static bool vmxHCIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
    1863 {
    1864     PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1865     uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
    1866     Assert(pMsrs);
    1867     Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
    1868     for (uint32_t i = 0; i < cMsrs; i++)
    1869     {
    1870         if (pMsrs[i].u32Msr == idMsr)
    1871             return true;
    1872     }
    1873     return false;
    1874 }
    1875 #endif
    1876 
    1877 
    18781367/**
    18791368 * Verifies that our cached values of the VMCS fields are all consistent with
     
    19601449    return VINF_SUCCESS;
    19611450}
    1962 
    1963 
    1964 #ifdef IN_RING0
    1965 /**
    1966  * Sets up the LBR MSR ranges based on the host CPU.
    1967  *
    1968  * @returns VBox status code.
    1969  * @param   pVM     The cross context VM structure.
    1970  */
    1971 static int vmxHCSetupLbrMsrRange(PVMCC pVM)
    1972 {
    1973     Assert(VM_IS_VMX_LBR(pVM));
    1974     uint32_t idLbrFromIpMsrFirst;
    1975     uint32_t idLbrFromIpMsrLast;
    1976     uint32_t idLbrToIpMsrFirst;
    1977     uint32_t idLbrToIpMsrLast;
    1978     uint32_t idLbrTosMsr;
    1979 
    1980     /*
    1981      * Determine the LBR MSRs supported for this host CPU family and model.
    1982      *
    1983      * See Intel spec. 17.4.8 "LBR Stack".
    1984      * See Intel "Model-Specific Registers" spec.
    1985      */
    1986     uint32_t const uFamilyModel = (pVM->cpum.ro.HostFeatures.uFamily << 8)
    1987                                 | pVM->cpum.ro.HostFeatures.uModel;
    1988     switch (uFamilyModel)
    1989     {
    1990         case 0x0f01: case 0x0f02:
    1991             idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
    1992             idLbrFromIpMsrLast  = MSR_P4_LASTBRANCH_3;
    1993             idLbrToIpMsrFirst   = 0x0;
    1994             idLbrToIpMsrLast    = 0x0;
    1995             idLbrTosMsr         = MSR_P4_LASTBRANCH_TOS;
    1996             break;
    1997 
    1998         case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
    1999         case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
    2000         case 0x066a: case 0x066c: case 0x067d: case 0x067e:
    2001             idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
    2002             idLbrFromIpMsrLast  = MSR_LASTBRANCH_31_FROM_IP;
    2003             idLbrToIpMsrFirst   = MSR_LASTBRANCH_0_TO_IP;
    2004             idLbrToIpMsrLast    = MSR_LASTBRANCH_31_TO_IP;
    2005             idLbrTosMsr         = MSR_LASTBRANCH_TOS;
    2006             break;
    2007 
    2008         case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
    2009         case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
    2010         case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
    2011         case 0x062e: case 0x0625: case 0x062c: case 0x062f:
    2012             idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
    2013             idLbrFromIpMsrLast  = MSR_LASTBRANCH_15_FROM_IP;
    2014             idLbrToIpMsrFirst   = MSR_LASTBRANCH_0_TO_IP;
    2015             idLbrToIpMsrLast    = MSR_LASTBRANCH_15_TO_IP;
    2016             idLbrTosMsr         = MSR_LASTBRANCH_TOS;
    2017             break;
    2018 
    2019         case 0x0617: case 0x061d: case 0x060f:
    2020             idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
    2021             idLbrFromIpMsrLast  = MSR_CORE2_LASTBRANCH_3_FROM_IP;
    2022             idLbrToIpMsrFirst   = MSR_CORE2_LASTBRANCH_0_TO_IP;
    2023             idLbrToIpMsrLast    = MSR_CORE2_LASTBRANCH_3_TO_IP;
    2024             idLbrTosMsr         = MSR_CORE2_LASTBRANCH_TOS;
    2025             break;
    2026 
    2027         /* Atom and related microarchitectures we don't care about:
    2028         case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
    2029         case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
    2030         case 0x0636: */
    2031         /* All other CPUs: */
    2032         default:
    2033         {
    2034             LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
    2035             VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
    2036             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2037         }
    2038     }
    2039 
    2040     /*
    2041      * Validate.
    2042      */
    2043     uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
    2044     PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
    2045     AssertCompile(   RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr)
    2046                   == RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrToIpMsr));
    2047     if (cLbrStack > RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr))
    2048     {
    2049         LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
    2050         VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
    2051         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2052     }
    2053     NOREF(pVCpu0);
    2054 
    2055     /*
    2056      * Update the LBR info. to the VM struct. for use later.
    2057      */
    2058     pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr;
    2059 
    2060     pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
    2061     pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast  = pVM->hmr0.s.vmx.idLbrFromIpMsrLast  = idLbrFromIpMsrLast;
    2062 
    2063     pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst   = idLbrToIpMsrFirst;
    2064     pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast    = pVM->hmr0.s.vmx.idLbrToIpMsrLast    = idLbrToIpMsrLast;
    2065     return VINF_SUCCESS;
    2066 }
    2067 
    2068 
    2069 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2070 /**
    2071  * Sets up the shadow VMCS fields arrays.
    2072  *
    2073  * This function builds arrays of VMCS fields to sync the shadow VMCS later while
    2074  * executing the guest.
    2075  *
    2076  * @returns VBox status code.
    2077  * @param   pVM     The cross context VM structure.
    2078  */
    2079 static int vmxHCSetupShadowVmcsFieldsArrays(PVMCC pVM)
    2080 {
    2081     /*
    2082      * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest
    2083      * when the host does not support it.
    2084      */
    2085     bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll;
    2086     if (   !fGstVmwriteAll
    2087         || (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL))
    2088     { /* likely. */ }
    2089     else
    2090     {
    2091         LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n"));
    2092         VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL;
    2093         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2094     }
    2095 
    2096     uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
    2097     uint32_t       cRwFields   = 0;
    2098     uint32_t       cRoFields   = 0;
    2099     for (uint32_t i = 0; i < cVmcsFields; i++)
    2100     {
    2101         VMXVMCSFIELD VmcsField;
    2102         VmcsField.u = g_aVmcsFields[i];
    2103 
    2104         /*
    2105          * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS.
    2106          * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included
    2107          * in the shadow VMCS fields array as they would be redundant.
    2108          *
    2109          * If the VMCS field depends on a CPU feature that is not exposed to the guest,
    2110          * we must not include it in the shadow VMCS fields array. Guests attempting to
    2111          * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
    2112          * the required behavior.
    2113          */
    2114         if (   VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL
    2115             && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u))
    2116         {
    2117             /*
    2118              * Read-only fields are placed in a separate array so that while syncing shadow
    2119              * VMCS fields later (which is more performance critical) we can avoid branches.
    2120              *
    2121              * However, if the guest can write to all fields (including read-only fields),
    2122              * we treat it a as read/write field. Otherwise, writing to these fields would
    2123              * cause a VMWRITE instruction error while syncing the shadow VMCS.
    2124              */
    2125             if (   fGstVmwriteAll
    2126                 || !VMXIsVmcsFieldReadOnly(VmcsField.u))
    2127                 pVM->hmr0.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u;
    2128             else
    2129                 pVM->hmr0.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u;
    2130         }
    2131     }
    2132 
    2133     /* Update the counts. */
    2134     pVM->hmr0.s.vmx.cShadowVmcsFields   = cRwFields;
    2135     pVM->hmr0.s.vmx.cShadowVmcsRoFields = cRoFields;
    2136     return VINF_SUCCESS;
    2137 }
    2138 
    2139 
    2140 /**
    2141  * Sets up the VMREAD and VMWRITE bitmaps.
    2142  *
    2143  * @param   pVM     The cross context VM structure.
    2144  */
    2145 static void vmxHCSetupVmreadVmwriteBitmaps(PVMCC pVM)
    2146 {
    2147     /*
    2148      * By default, ensure guest attempts to access any VMCS fields cause VM-exits.
    2149      */
    2150     uint32_t const cbBitmap        = X86_PAGE_4K_SIZE;
    2151     uint8_t       *pbVmreadBitmap  = (uint8_t *)pVM->hmr0.s.vmx.pvVmreadBitmap;
    2152     uint8_t       *pbVmwriteBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmwriteBitmap;
    2153     ASMMemFill32(pbVmreadBitmap,  cbBitmap, UINT32_C(0xffffffff));
    2154     ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
    2155 
    2156     /*
    2157      * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the
    2158      * VMREAD and VMWRITE bitmaps.
    2159      */
    2160     {
    2161         uint32_t const *paShadowVmcsFields = pVM->hmr0.s.vmx.paShadowVmcsFields;
    2162         uint32_t const  cShadowVmcsFields  = pVM->hmr0.s.vmx.cShadowVmcsFields;
    2163         for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    2164         {
    2165             uint32_t const uVmcsField = paShadowVmcsFields[i];
    2166             Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    2167             Assert(uVmcsField >> 3 < cbBitmap);
    2168             ASMBitClear(pbVmreadBitmap  + (uVmcsField >> 3), uVmcsField & 7);
    2169             ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7);
    2170         }
    2171     }
    2172 
    2173     /*
    2174      * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap
    2175      * if the host supports VMWRITE to all supported VMCS fields.
    2176      */
    2177     if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
    2178     {
    2179         uint32_t const *paShadowVmcsRoFields = pVM->hmr0.s.vmx.paShadowVmcsRoFields;
    2180         uint32_t const  cShadowVmcsRoFields  = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
    2181         for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
    2182         {
    2183             uint32_t const uVmcsField = paShadowVmcsRoFields[i];
    2184             Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    2185             Assert(uVmcsField >> 3 < cbBitmap);
    2186             ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
    2187         }
    2188     }
    2189 }
    2190 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    2191 
    2192 
    2193 /**
    2194  * Sets up the APIC-access page address for the VMCS.
    2195  *
    2196  * @param   pVCpu   The cross context virtual CPU structure.
    2197  */
    2198 DECLINLINE(void) vmxHCSetupVmcsApicAccessAddr(PVMCPUCC pVCpu)
    2199 {
    2200     RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysApicAccess;
    2201     Assert(HCPhysApicAccess != NIL_RTHCPHYS);
    2202     Assert(!(HCPhysApicAccess & 0xfff));                     /* Bits 11:0 MBZ. */
    2203     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
    2204     AssertRC(rc);
    2205 }
    2206 
    2207 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2208 
    2209 /**
    2210  * Sets up the VMREAD bitmap address for the VMCS.
    2211  *
    2212  * @param   pVCpu   The cross context virtual CPU structure.
    2213  */
    2214 DECLINLINE(void) vmxHCSetupVmcsVmreadBitmapAddr(PVMCPUCC pVCpu)
    2215 {
    2216     RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmreadBitmap;
    2217     Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS);
    2218     Assert(!(HCPhysVmreadBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
    2219     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap);
    2220     AssertRC(rc);
    2221 }
    2222 
    2223 
    2224 /**
    2225  * Sets up the VMWRITE bitmap address for the VMCS.
    2226  *
    2227  * @param   pVCpu   The cross context virtual CPU structure.
    2228  */
    2229 DECLINLINE(void) vmxHCSetupVmcsVmwriteBitmapAddr(PVMCPUCC pVCpu)
    2230 {
    2231     RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmwriteBitmap;
    2232     Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS);
    2233     Assert(!(HCPhysVmwriteBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
    2234     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap);
    2235     AssertRC(rc);
    2236 }
    2237 
    2238 #endif
    2239 
    2240 /**
    2241  * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
    2242  *
    2243  * @param   pVCpu           The cross context virtual CPU structure.
    2244  * @param   pVmcsInfo       The VMCS info. object.
    2245  */
    2246 static void vmxHCSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2247 {
    2248     Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
    2249 
    2250     /*
    2251      * By default, ensure guest attempts to access any MSR cause VM-exits.
    2252      * This shall later be relaxed for specific MSRs as necessary.
    2253      *
    2254      * Note: For nested-guests, the entire bitmap will be merged prior to
    2255      * executing the nested-guest using hardware-assisted VMX and hence there
    2256      * is no need to perform this operation. See vmxHCMergeMsrBitmapNested.
    2257      */
    2258     Assert(pVmcsInfo->pvMsrBitmap);
    2259     ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
    2260 
    2261     /*
    2262      * The guest can access the following MSRs (read, write) without causing
    2263      * VM-exits; they are loaded/stored automatically using fields in the VMCS.
    2264      */
    2265     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2266     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_CS,  VMXMSRPM_ALLOW_RD_WR);
    2267     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
    2268     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
    2269     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_GS_BASE,        VMXMSRPM_ALLOW_RD_WR);
    2270     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_FS_BASE,        VMXMSRPM_ALLOW_RD_WR);
    2271 
    2272     /*
    2273      * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
    2274      * associated with then. We never need to intercept access (writes need to be
    2275      * executed without causing a VM-exit, reads will #GP fault anyway).
    2276      *
    2277      * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
    2278      * read/write them. We swap the guest/host MSR value using the
    2279      * auto-load/store MSR area.
    2280      */
    2281     if (pVM->cpum.ro.GuestFeatures.fIbpb)
    2282         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD,  VMXMSRPM_ALLOW_RD_WR);
    2283     if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
    2284         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
    2285     if (pVM->cpum.ro.GuestFeatures.fIbrs)
    2286         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
    2287 
    2288     /*
    2289      * Allow full read/write access for the following MSRs (mandatory for VT-x)
    2290      * required for 64-bit guests.
    2291      */
    2292     if (pVM->hmr0.s.fAllow64BitGuests)
    2293     {
    2294         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_LSTAR,          VMXMSRPM_ALLOW_RD_WR);
    2295         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K6_STAR,           VMXMSRPM_ALLOW_RD_WR);
    2296         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_SF_MASK,        VMXMSRPM_ALLOW_RD_WR);
    2297         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
    2298     }
    2299 
    2300     /*
    2301      * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
    2302      */
    2303 #ifdef VBOX_STRICT
    2304     Assert(pVmcsInfo->pvMsrBitmap);
    2305     uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
    2306     Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
    2307 #endif
    2308 }
    2309 
    2310 
    2311 /**
    2312  * Sets up pin-based VM-execution controls in the VMCS.
    2313  *
    2314  * @returns VBox status code.
    2315  * @param   pVCpu       The cross context virtual CPU structure.
    2316  * @param   pVmcsInfo   The VMCS info. object.
    2317  */
    2318 static int vmxHCSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2319 {
    2320     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2321     uint32_t       fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0;      /* Bits set here must always be set. */
    2322     uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1;      /* Bits cleared here must always be cleared. */
    2323 
    2324     fVal |= VMX_PIN_CTLS_EXT_INT_EXIT                        /* External interrupts cause a VM-exit. */
    2325          |  VMX_PIN_CTLS_NMI_EXIT;                           /* Non-maskable interrupts (NMIs) cause a VM-exit. */
    2326 
    2327     if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
    2328         fVal |= VMX_PIN_CTLS_VIRT_NMI;                       /* Use virtual NMIs and virtual-NMI blocking features. */
    2329 
    2330     /* Enable the VMX-preemption timer. */
    2331     if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
    2332     {
    2333         Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
    2334         fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
    2335     }
    2336 
    2337 #if 0
    2338     /* Enable posted-interrupt processing. */
    2339     if (pVM->hm.s.fPostedIntrs)
    2340     {
    2341         Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1  & VMX_PIN_CTLS_POSTED_INT);
    2342         Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
    2343         fVal |= VMX_PIN_CTLS_POSTED_INT;
    2344     }
    2345 #endif
    2346 
    2347     if ((fVal & fZap) != fVal)
    2348     {
    2349         LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2350                     g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
    2351         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    2352         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2353     }
    2354 
    2355     /* Commit it to the VMCS and update our cache. */
    2356     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
    2357     AssertRC(rc);
    2358     pVmcsInfo->u32PinCtls = fVal;
    2359 
    2360     return VINF_SUCCESS;
    2361 }
    2362 
    2363 
    2364 /**
    2365  * Sets up secondary processor-based VM-execution controls in the VMCS.
    2366  *
    2367  * @returns VBox status code.
    2368  * @param   pVCpu       The cross context virtual CPU structure.
    2369  * @param   pVmcsInfo   The VMCS info. object.
    2370  */
    2371 static int vmxHCSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2372 {
    2373     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2374     uint32_t       fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0;    /* Bits set here must be set in the VMCS. */
    2375     uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    2376 
    2377     /* WBINVD causes a VM-exit. */
    2378     if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
    2379         fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
    2380 
    2381     /* Enable EPT (aka nested-paging). */
    2382     if (VM_IS_VMX_NESTED_PAGING(pVM))
    2383         fVal |= VMX_PROC_CTLS2_EPT;
    2384 
    2385     /* Enable the INVPCID instruction if we expose it to the guest and is supported
    2386        by the hardware. Without this, guest executing INVPCID would cause a #UD. */
    2387     if (   pVM->cpum.ro.GuestFeatures.fInvpcid
    2388         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
    2389         fVal |= VMX_PROC_CTLS2_INVPCID;
    2390 
    2391     /* Enable VPID. */
    2392     if (pVM->hmr0.s.vmx.fVpid)
    2393         fVal |= VMX_PROC_CTLS2_VPID;
    2394 
    2395     /* Enable unrestricted guest execution. */
    2396     if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
    2397         fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
    2398 
    2399 #if 0
    2400     if (pVM->hm.s.fVirtApicRegs)
    2401     {
    2402         /* Enable APIC-register virtualization. */
    2403         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
    2404         fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
    2405 
    2406         /* Enable virtual-interrupt delivery. */
    2407         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
    2408         fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
    2409     }
    2410 #endif
    2411 
    2412     /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
    2413        where the TPR shadow resides. */
    2414     /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
    2415      *        done dynamically. */
    2416     if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    2417     {
    2418         fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
    2419         vmxHCSetupVmcsApicAccessAddr(pVCpu);
    2420    }
    2421 
    2422     /* Enable the RDTSCP instruction if we expose it to the guest and is supported
    2423        by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
    2424     if (   pVM->cpum.ro.GuestFeatures.fRdTscP
    2425         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
    2426         fVal |= VMX_PROC_CTLS2_RDTSCP;
    2427 
    2428     /* Enable Pause-Loop exiting. */
    2429     if (   (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
    2430         && pVM->hm.s.vmx.cPleGapTicks
    2431         && pVM->hm.s.vmx.cPleWindowTicks)
    2432     {
    2433         fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
    2434 
    2435         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);          AssertRC(rc);
    2436         rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);    AssertRC(rc);
    2437     }
    2438 
    2439     if ((fVal & fZap) != fVal)
    2440     {
    2441         LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2442                     g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
    2443         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    2444         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2445     }
    2446 
    2447     /* Commit it to the VMCS and update our cache. */
    2448     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
    2449     AssertRC(rc);
    2450     pVmcsInfo->u32ProcCtls2 = fVal;
    2451 
    2452     return VINF_SUCCESS;
    2453 }
    2454 
    2455 
    2456 /**
    2457  * Sets up processor-based VM-execution controls in the VMCS.
    2458  *
    2459  * @returns VBox status code.
    2460  * @param   pVCpu       The cross context virtual CPU structure.
    2461  * @param   pVmcsInfo   The VMCS info. object.
    2462  */
    2463 static int vmxHCSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2464 {
    2465     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2466     uint32_t       fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
    2467     uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    2468 
    2469     fVal |= VMX_PROC_CTLS_HLT_EXIT                                    /* HLT causes a VM-exit. */
    2470          |  VMX_PROC_CTLS_USE_TSC_OFFSETTING                          /* Use TSC-offsetting. */
    2471          |  VMX_PROC_CTLS_MOV_DR_EXIT                                 /* MOV DRx causes a VM-exit. */
    2472          |  VMX_PROC_CTLS_UNCOND_IO_EXIT                              /* All IO instructions cause a VM-exit. */
    2473          |  VMX_PROC_CTLS_RDPMC_EXIT                                  /* RDPMC causes a VM-exit. */
    2474          |  VMX_PROC_CTLS_MONITOR_EXIT                                /* MONITOR causes a VM-exit. */
    2475          |  VMX_PROC_CTLS_MWAIT_EXIT;                                 /* MWAIT causes a VM-exit. */
    2476 
    2477     /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
    2478     if (   !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
    2479         ||  (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
    2480     {
    2481         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
    2482         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2483     }
    2484 
    2485     /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
    2486     if (!VM_IS_VMX_NESTED_PAGING(pVM))
    2487     {
    2488         Assert(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
    2489         fVal |= VMX_PROC_CTLS_INVLPG_EXIT
    2490              |  VMX_PROC_CTLS_CR3_LOAD_EXIT
    2491              |  VMX_PROC_CTLS_CR3_STORE_EXIT;
    2492     }
    2493 
    2494 #ifdef IN_INRG0
    2495     /* Use TPR shadowing if supported by the CPU. */
    2496     if (   PDMHasApic(pVM)
    2497         && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
    2498     {
    2499         fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW;                /* CR8 reads from the Virtual-APIC page. */
    2500                                                              /* CR8 writes cause a VM-exit based on TPR threshold. */
    2501         Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
    2502         Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
    2503         vmxHCSetupVmcsVirtApicAddr(pVmcsInfo);
    2504     }
    2505     else
    2506     {
    2507         /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
    2508            invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
    2509         if (pVM->hmr0.s.fAllow64BitGuests)
    2510             fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT             /* CR8 reads cause a VM-exit. */
    2511                  |  VMX_PROC_CTLS_CR8_LOAD_EXIT;             /* CR8 writes cause a VM-exit. */
    2512     }
    2513 
    2514     /* Use MSR-bitmaps if supported by the CPU. */
    2515     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2516     {
    2517         fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
    2518         vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
    2519     }
    2520 #endif
    2521 
    2522     /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
    2523     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2524         fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
    2525 
    2526     if ((fVal & fZap) != fVal)
    2527     {
    2528         LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2529                     g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
    2530         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    2531         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2532     }
    2533 
    2534     /* Commit it to the VMCS and update our cache. */
    2535     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
    2536     AssertRC(rc);
    2537     pVmcsInfo->u32ProcCtls = fVal;
    2538 
    2539     /* Set up MSR permissions that don't change through the lifetime of the VM. */
    2540     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2541         vmxHCSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
    2542 
    2543     /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
    2544     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2545         return vmxHCSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
    2546 
    2547     /* Sanity check, should not really happen. */
    2548     if (RT_LIKELY(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
    2549     { /* likely */ }
    2550     else
    2551     {
    2552         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INVALID_UX_COMBO;
    2553         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2554     }
    2555 
    2556     /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
    2557     return VINF_SUCCESS;
    2558 }
    2559 
    2560 
    2561 /**
    2562  * Sets up miscellaneous (everything other than Pin, Processor and secondary
    2563  * Processor-based VM-execution) control fields in the VMCS.
    2564  *
    2565  * @returns VBox status code.
    2566  * @param   pVCpu       The cross context virtual CPU structure.
    2567  * @param   pVmcsInfo   The VMCS info. object.
    2568  */
    2569 static int vmxHCSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2570 {
    2571 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2572     if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
    2573     {
    2574         vmxHCSetupVmcsVmreadBitmapAddr(pVCpu);
    2575         vmxHCSetupVmcsVmwriteBitmapAddr(pVCpu);
    2576     }
    2577 #endif
    2578 
    2579     Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
    2580     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    2581     AssertRC(rc);
    2582 
    2583     rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
    2584     if (RT_SUCCESS(rc))
    2585     {
    2586         uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
    2587         uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
    2588 
    2589         rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);    AssertRC(rc);
    2590         rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);    AssertRC(rc);
    2591 
    2592         pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
    2593         pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
    2594 
    2595         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
    2596         {
    2597             rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
    2598             AssertRC(rc);
    2599         }
    2600         return VINF_SUCCESS;
    2601     }
    2602     else
    2603         LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
    2604     return rc;
    2605 }
    2606 
    2607 
    2608 /**
    2609  * Sets up the initial exception bitmap in the VMCS based on static conditions.
    2610  *
    2611  * We shall setup those exception intercepts that don't change during the
    2612  * lifetime of the VM here. The rest are done dynamically while loading the
    2613  * guest state.
    2614  *
    2615  * @param   pVCpu       The cross context virtual CPU structure.
    2616  * @param   pVmcsInfo   The VMCS info. object.
    2617  */
    2618 static void vmxHCSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2619 {
    2620     /*
    2621      * The following exceptions are always intercepted:
    2622      *
    2623      * #AC - To prevent the guest from hanging the CPU and for dealing with
    2624      *       split-lock detecting host configs.
    2625      * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
    2626      *       recursive #DBs can cause a CPU hang.
    2627      * #PF - To sync our shadow page tables when nested-paging is not used.
    2628      */
    2629     bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
    2630     uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
    2631                                | RT_BIT(X86_XCPT_DB)
    2632                                | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
    2633 
    2634     /* Commit it to the VMCS. */
    2635     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    2636     AssertRC(rc);
    2637 
    2638     /* Update our cache of the exception bitmap. */
    2639     pVmcsInfo->u32XcptBitmap = uXcptBitmap;
    2640 }
    2641 
    2642 
    2643 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2644 /**
    2645  * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
    2646  *
    2647  * @returns VBox status code.
    2648  * @param   pVmcsInfo   The VMCS info. object.
    2649  */
    2650 static int vmxHCSetupVmcsCtlsNested(PVMXVMCSINFO pVmcsInfo)
    2651 {
    2652     Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
    2653     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    2654     AssertRC(rc);
    2655 
    2656     rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
    2657     if (RT_SUCCESS(rc))
    2658     {
    2659         if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2660             vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
    2661 
    2662         /* Paranoia - We've not yet initialized these, they shall be done while merging the VMCS. */
    2663         Assert(!pVmcsInfo->u64Cr0Mask);
    2664         Assert(!pVmcsInfo->u64Cr4Mask);
    2665         return VINF_SUCCESS;
    2666     }
    2667     LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
    2668     return rc;
    2669 }
    2670 #endif
    2671 #endif /* !IN_RING0 */
    26721451
    26731452
     
    27341513             */
    27351514            if (   g_fHmVmxSupportsVmcsEfer
    2736                 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
     1515#ifndef IN_NEM_DARWIN
     1516                && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
     1517#endif
     1518                )
    27371519                fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
    27381520            else
     
    27941576            fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    27951577
    2796 #ifdef IN_RING0
     1578#ifndef IN_NEM_DARWIN
    27971579            /*
    27981580             * If the VMCS EFER MSR fields are supported by the hardware, we use it.
     
    28021584             */
    28031585            if (   g_fHmVmxSupportsVmcsEfer
    2804                 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
     1586                && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
    28051587            {
    28061588                fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
     
    30391821
    30401822
    3041 #ifdef IN_RING0
    3042 /**
    3043  * Exports the guest's RSP into the guest-state area in the VMCS.
    3044  *
    3045  * @param   pVCpu   The cross context virtual CPU structure.
    3046  *
    3047  * @remarks No-long-jump zone!!!
    3048  */
    3049 static void vmxHCExportGuestRsp(PVMCPUCC pVCpu)
    3050 {
    3051     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RSP)
    3052     {
    3053         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
    3054 
    3055         int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
    3056         AssertRC(rc);
    3057 
    3058         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RSP);
    3059         Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
    3060     }
    3061 }
    3062 #endif
    3063 
    3064 
    30651823/**
    30661824 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
     
    30841842        Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
    30851843
    3086 #ifdef IN_RING0
     1844#ifndef IN_NEM_DARWIN
    30871845        /*
    30881846         * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
     
    31751933
    31761934        rc  = vmxHCClearShadowVmcs(pVmcsInfo);
    3177         rc |= vmxHCLoadVmcs(pVmcsInfo);
     1935        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    31781936    }
    31791937
     
    32191977
    32201978        rc  = vmxHCClearShadowVmcs(pVmcsInfo);
    3221         rc |= vmxHCLoadVmcs(pVmcsInfo);
     1979        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    32221980    }
    32231981    return rc;
     
    32762034        Log4Func(("Disabled\n"));
    32772035    }
    3278 }
    3279 #endif
    3280 
    3281 
    3282 #ifdef IN_RING0
    3283 /**
    3284  * Exports the guest hardware-virtualization state.
    3285  *
    3286  * @returns VBox status code.
    3287  * @param   pVCpu           The cross context virtual CPU structure.
    3288  * @param   pVmxTransient   The VMX-transient structure.
    3289  *
    3290  * @remarks No-long-jump zone!!!
    3291  */
    3292 static int vmxHCExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    3293 {
    3294     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
    3295     {
    3296 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3297         /*
    3298          * Check if the VMX feature is exposed to the guest and if the host CPU supports
    3299          * VMCS shadowing.
    3300          */
    3301         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
    3302         {
    3303             /*
    3304              * If the nested hypervisor has loaded a current VMCS and is in VMX root mode,
    3305              * copy the nested hypervisor's current VMCS into the shadow VMCS and enable
    3306              * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits.
    3307              *
    3308              * We check for VMX root mode here in case the guest executes VMXOFF without
    3309              * clearing the current VMCS pointer and our VMXOFF instruction emulation does
    3310              * not clear the current VMCS pointer.
    3311              */
    3312             PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    3313             if (   CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)
    3314                 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
    3315                 && CPUMIsGuestVmxCurrentVmcsValid(&pVCpu->cpum.GstCtx))
    3316             {
    3317                 /* Paranoia. */
    3318                 Assert(!pVmxTransient->fIsNestedGuest);
    3319 
    3320                 /*
    3321                  * For performance reasons, also check if the nested hypervisor's current VMCS
    3322                  * was newly loaded or modified before copying it to the shadow VMCS.
    3323                  */
    3324                 if (!VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs)
    3325                 {
    3326                     int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
    3327                     AssertRCReturn(rc, rc);
    3328                     VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs = true;
    3329                 }
    3330                 vmxHCEnableVmcsShadowing(pVmcsInfo);
    3331             }
    3332             else
    3333                 vmxHCDisableVmcsShadowing(pVmcsInfo);
    3334         }
    3335 #else
    3336         NOREF(pVmxTransient);
    3337 #endif
    3338         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
    3339     }
    3340     return VINF_SUCCESS;
    33412036}
    33422037#endif
     
    34232118             */
    34242119            uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
    3425 #ifdef IN_RING0
     2120#ifndef IN_NEM_DARWIN
    34262121            if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    34272122            {
     
    35522247        if (VM_IS_VMX_NESTED_PAGING(pVM))
    35532248        {
    3554 #ifdef IN_RING0
     2249#ifndef IN_NEM_DARWIN
    35552250            PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    35562251            pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
     
    35972292                 */
    35982293            }
    3599 #ifdef IN_RING0
     2294#ifndef IN_NEM_DARWIN
    36002295            else
    36012296            {
     
    36692364        Assert(!RT_HI_U32(u64GuestCr4));
    36702365
    3671 #ifdef IN_RING0
     2366#ifndef IN_NEM_DARWIN
    36722367        /*
    36732368         * Setup VT-x's view of the guest CR4.
     
    37472442        rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);   AssertRC(rc);
    37482443
    3749 #ifdef IN_RING0
     2444#ifndef IN_NEM_DARWIN
    37502445        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    37512446        bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     
    37532448        {
    37542449            pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    3755             vmxHCUpdateStartVmFunction(pVCpu);
     2450            hmR0VmxUpdateStartVmFunction(pVCpu);
    37562451        }
    37572452#endif
     
    37632458    return rc;
    37642459}
    3765 
    3766 
    3767 #ifdef IN_RING0
    3768 /**
    3769  * Exports the guest debug registers into the guest-state area in the VMCS.
    3770  * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
    3771  *
    3772  * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
    3773  *
    3774  * @returns VBox status code.
    3775  * @param   pVCpu           The cross context virtual CPU structure.
    3776  * @param   pVmxTransient   The VMX-transient structure.
    3777  *
    3778  * @remarks No-long-jump zone!!!
    3779  */
    3780 static int vmxHCExportSharedDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    3781 {
    3782     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    3783 
    3784     /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
    3785      *        stepping. */
    3786     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    3787 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3788     if (pVmxTransient->fIsNestedGuest)
    3789     {
    3790         int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
    3791         AssertRC(rc);
    3792 
    3793         /*
    3794          * We don't want to always intercept MOV DRx for nested-guests as it causes
    3795          * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
    3796          * Instead, they are strictly only requested when the nested hypervisor intercepts
    3797          * them -- handled while merging VMCS controls.
    3798          *
    3799          * If neither the outer nor the nested-hypervisor is intercepting MOV DRx,
    3800          * then the nested-guest debug state should be actively loaded on the host so that
    3801          * nested-guest reads its own debug registers without causing VM-exits.
    3802          */
    3803         if (   !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
    3804             && !CPUMIsGuestDebugStateActive(pVCpu))
    3805             CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    3806         return VINF_SUCCESS;
    3807     }
    3808 #endif
    3809 
    3810 #ifdef VBOX_STRICT
    3811     /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
    3812     if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    3813     {
    3814         /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
    3815         Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
    3816         Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
    3817     }
    3818 #endif
    3819 
    3820     bool     fSteppingDB      = false;
    3821     bool     fInterceptMovDRx = false;
    3822     uint32_t uProcCtls        = pVmcsInfo->u32ProcCtls;
    3823     if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    3824     {
    3825         /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
    3826         if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
    3827         {
    3828             uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
    3829             Assert(fSteppingDB == false);
    3830         }
    3831         else
    3832         {
    3833             pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
    3834             VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    3835             pVCpu->hmr0.s.fClearTrapFlag = true;
    3836             fSteppingDB = true;
    3837         }
    3838     }
    3839 
    3840     uint64_t u64GuestDr7;
    3841     if (   fSteppingDB
    3842         || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
    3843     {
    3844         /*
    3845          * Use the combined guest and host DRx values found in the hypervisor register set
    3846          * because the hypervisor debugger has breakpoints active or someone is single stepping
    3847          * on the host side without a monitor trap flag.
    3848          *
    3849          * Note! DBGF expects a clean DR6 state before executing guest code.
    3850          */
    3851         if (!CPUMIsHyperDebugStateActive(pVCpu))
    3852         {
    3853             CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
    3854             Assert(CPUMIsHyperDebugStateActive(pVCpu));
    3855             Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    3856         }
    3857 
    3858         /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
    3859         u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
    3860         pVCpu->hmr0.s.fUsingHyperDR7 = true;
    3861         fInterceptMovDRx = true;
    3862     }
    3863     else
    3864     {
    3865         /*
    3866          * If the guest has enabled debug registers, we need to load them prior to
    3867          * executing guest code so they'll trigger at the right time.
    3868          */
    3869         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
    3870         if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    3871         {
    3872             if (!CPUMIsGuestDebugStateActive(pVCpu))
    3873             {
    3874                 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    3875                 Assert(CPUMIsGuestDebugStateActive(pVCpu));
    3876                 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    3877                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxArmed);
    3878             }
    3879             Assert(!fInterceptMovDRx);
    3880         }
    3881         else if (!CPUMIsGuestDebugStateActive(pVCpu))
    3882         {
    3883             /*
    3884              * If no debugging enabled, we'll lazy load DR0-3.  Unlike on AMD-V, we
    3885              * must intercept #DB in order to maintain a correct DR6 guest value, and
    3886              * because we need to intercept it to prevent nested #DBs from hanging the
    3887              * CPU, we end up always having to intercept it. See vmxHCSetupVmcsXcptBitmap().
    3888              */
    3889             fInterceptMovDRx = true;
    3890         }
    3891 
    3892         /* Update DR7 with the actual guest value. */
    3893         u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
    3894         pVCpu->hmr0.s.fUsingHyperDR7 = false;
    3895     }
    3896 
    3897     if (fInterceptMovDRx)
    3898         uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
    3899     else
    3900         uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
    3901 
    3902     /*
    3903      * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
    3904      * monitor-trap flag and update our cache.
    3905      */
    3906     if (uProcCtls != pVmcsInfo->u32ProcCtls)
    3907     {
    3908         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    3909         AssertRC(rc);
    3910         pVmcsInfo->u32ProcCtls = uProcCtls;
    3911     }
    3912 
    3913     /*
    3914      * Update guest DR7.
    3915      */
    3916     int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, u64GuestDr7);
    3917     AssertRC(rc);
    3918 
    3919     /*
    3920      * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
    3921      * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
    3922      *
    3923      * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
    3924      */
    3925     if (fSteppingDB)
    3926     {
    3927         Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
    3928         Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
    3929 
    3930         uint32_t fIntrState = 0;
    3931         rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
    3932         AssertRC(rc);
    3933 
    3934         if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    3935         {
    3936             fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    3937             rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    3938             AssertRC(rc);
    3939         }
    3940     }
    3941 
    3942     return VINF_SUCCESS;
    3943 }
    3944 #endif /* !IN_RING0 */
    39452460
    39462461
     
    40782593        /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
    40792594        uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    4080 #ifdef IN_RING0
     2595#ifndef IN_NEM_DARWIN
    40812596        if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    40822597        {
     
    41412656
    41422657    uint32_t u32Access = pSelReg->Attr.u;
    4143 #ifdef IN_RING0
     2658#ifndef IN_NEM_DARWIN
    41442659    if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    41452660#endif
     
    41572672            u32Access = X86DESCATTR_UNUSABLE;
    41582673    }
    4159 #ifdef IN_RING0
     2674#ifndef IN_NEM_DARWIN
    41602675    else
    41612676    {
     
    42042719{
    42052720    int                 rc              = VERR_INTERNAL_ERROR_5;
    4206 #ifdef IN_RING0
     2721#ifndef IN_NEM_DARWIN
    42072722    PVMCC               pVM             = pVCpu->CTX_SUFF(pVM);
    42082723#endif
    42092724    PCCPUMCTX           pCtx            = &pVCpu->cpum.GstCtx;
    42102725    PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    4211 #ifdef IN_RING0
     2726#ifndef IN_NEM_DARWIN
    42122727    PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    42132728#endif
     
    42212736        {
    42222737            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    4223 #ifdef IN_RING0
     2738#ifndef IN_NEM_DARWIN
    42242739            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42252740                pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
     
    42332748        {
    42342749            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    4235 #ifdef IN_RING0
     2750#ifndef IN_NEM_DARWIN
    42362751            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42372752                pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
     
    42452760        {
    42462761            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    4247 #ifdef IN_RING0
     2762#ifndef IN_NEM_DARWIN
    42482763            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42492764                pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
     
    42572772        {
    42582773            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    4259 #ifdef IN_RING0
     2774#ifndef IN_NEM_DARWIN
    42602775            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42612776                pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
     
    42692784        {
    42702785            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    4271 #ifdef IN_RING0
     2786#ifndef IN_NEM_DARWIN
    42722787            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42732788                pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
     
    42812796        {
    42822797            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    4283 #ifdef IN_RING0
     2798#ifndef IN_NEM_DARWIN
    42842799            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42852800                pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
     
    43132828        uint64_t u64Base;
    43142829        uint32_t u32AccessRights;
    4315 #ifdef IN_RING0
     2830#ifndef IN_NEM_DARWIN
    43162831        if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
    43172832#endif
     
    43222837            u32AccessRights = pCtx->tr.Attr.u;
    43232838        }
    4324 #ifdef IN_RING0
     2839#ifndef IN_NEM_DARWIN
    43252840        else
    43262841        {
     
    44442959    return VINF_SUCCESS;
    44452960}
    4446 
    4447 
    4448 #ifdef IN_RING0
    4449 /**
    4450  * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
    4451  * areas.
    4452  *
    4453  * These MSRs will automatically be loaded to the host CPU on every successful
    4454  * VM-entry and stored from the host CPU on every successful VM-exit.
    4455  *
    4456  * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
    4457  * actual host MSR values are not- updated here for performance reasons. See
    4458  * vmxHCExportHostMsrs().
    4459  *
    4460  * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
    4461  *
    4462  * @returns VBox status code.
    4463  * @param   pVCpu           The cross context virtual CPU structure.
    4464  * @param   pVmxTransient   The VMX-transient structure.
    4465  *
    4466  * @remarks No-long-jump zone!!!
    4467  */
    4468 static int vmxHCExportGuestMsrs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    4469 {
    4470     AssertPtr(pVCpu);
    4471     AssertPtr(pVmxTransient);
    4472 
    4473     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    4474     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    4475 
    4476     /*
    4477      * MSRs that we use the auto-load/store MSR area in the VMCS.
    4478      * For 64-bit hosts, we load/restore them lazily, see vmxHCLazyLoadGuestMsrs(),
    4479      * nothing to do here. The host MSR values are updated when it's safe in
    4480      * vmxHCLazySaveHostMsrs().
    4481      *
    4482      * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
    4483      * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
    4484      * emulation. The merged MSR permission bitmap will ensure that we get VM-exits
    4485      * for any MSR that are not part of the lazy MSRs so we do not need to place
    4486      * those MSRs into the auto-load/store MSR area. Nothing to do here.
    4487      */
    4488     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    4489     {
    4490         /* No auto-load/store MSRs currently. */
    4491         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    4492     }
    4493 
    4494     /*
    4495      * Guest Sysenter MSRs.
    4496      */
    4497     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
    4498     {
    4499         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
    4500 
    4501         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    4502         {
    4503             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
    4504             AssertRC(rc);
    4505             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    4506         }
    4507 
    4508         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
    4509         {
    4510             int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
    4511             AssertRC(rc);
    4512             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    4513         }
    4514 
    4515         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
    4516         {
    4517             int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
    4518             AssertRC(rc);
    4519             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    4520         }
    4521     }
    4522 
    4523     /*
    4524      * Guest/host EFER MSR.
    4525      */
    4526     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    4527     {
    4528         /* Whether we are using the VMCS to swap the EFER MSR must have been
    4529            determined earlier while exporting VM-entry/VM-exit controls. */
    4530         Assert(!(ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
    4531         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    4532 
    4533         if (vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
    4534         {
    4535             /*
    4536              * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME).
    4537              * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0.
    4538              * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA
    4539              * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow)
    4540              * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until
    4541              * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state
    4542              * during VM-entry.
    4543              */
    4544             uint64_t uGuestEferMsr = pCtx->msrEFER;
    4545             if (!VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
    4546             {
    4547                 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
    4548                     uGuestEferMsr &= ~MSR_K6_EFER_LME;
    4549                 else
    4550                     Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
    4551             }
    4552 
    4553             /*
    4554              * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
    4555              * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
    4556              */
    4557             if (g_fHmVmxSupportsVmcsEfer)
    4558             {
    4559                 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr);
    4560                 AssertRC(rc);
    4561             }
    4562             else
    4563             {
    4564                 /*
    4565                  * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
    4566                  * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
    4567                  */
    4568                 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr,
    4569                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4570                 AssertRCReturn(rc, rc);
    4571             }
    4572 
    4573             Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER));
    4574         }
    4575         else if (!g_fHmVmxSupportsVmcsEfer)
    4576             vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
    4577 
    4578         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
    4579     }
    4580 
    4581     /*
    4582      * Other MSRs.
    4583      */
    4584     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
    4585     {
    4586         /* Speculation Control (R/W). */
    4587         HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
    4588         if (pVM->cpum.ro.GuestFeatures.fIbrs)
    4589         {
    4590             int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
    4591                                                 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4592             AssertRCReturn(rc, rc);
    4593         }
    4594 
    4595         /* Last Branch Record. */
    4596         if (VM_IS_VMX_LBR(pVM))
    4597         {
    4598             PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
    4599             uint32_t const idFromIpMsrStart = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
    4600             uint32_t const idToIpMsrStart   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
    4601             uint32_t const cLbrStack        = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
    4602             Assert(cLbrStack <= 32);
    4603             for (uint32_t i = 0; i < cLbrStack; i++)
    4604             {
    4605                 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idFromIpMsrStart + i,
    4606                                                     pVmcsInfoShared->au64LbrFromIpMsr[i],
    4607                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4608                 AssertRCReturn(rc, rc);
    4609 
    4610                 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
    4611                 if (idToIpMsrStart != 0)
    4612                 {
    4613                     rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idToIpMsrStart + i,
    4614                                                     pVmcsInfoShared->au64LbrToIpMsr[i],
    4615                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4616                     AssertRCReturn(rc, rc);
    4617                 }
    4618             }
    4619 
    4620             /* Add LBR top-of-stack MSR (which contains the index to the most recent record). */
    4621             int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, pVM->hmr0.s.vmx.idLbrTosMsr,
    4622                                                 pVmcsInfoShared->u64LbrTosMsr, false /* fSetReadWrite */,
    4623                                                 false /* fUpdateHostMsr */);
    4624             AssertRCReturn(rc, rc);
    4625         }
    4626 
    4627         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
    4628     }
    4629 
    4630     return VINF_SUCCESS;
    4631 }
    4632 
    4633 
    4634 /**
    4635  * Sets up the usage of TSC-offsetting and updates the VMCS.
    4636  *
    4637  * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
    4638  * VMX-preemption timer.
    4639  *
    4640  * @returns VBox status code.
    4641  * @param   pVCpu           The cross context virtual CPU structure.
    4642  * @param   pVmxTransient   The VMX-transient structure.
    4643  * @param   idCurrentCpu    The current CPU number.
    4644  *
    4645  * @remarks No-long-jump zone!!!
    4646  */
    4647 static void vmxHCUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu)
    4648 {
    4649     bool         fOffsettedTsc;
    4650     bool         fParavirtTsc;
    4651     uint64_t     uTscOffset;
    4652     PVMCC        pVM       = pVCpu->CTX_SUFF(pVM);
    4653     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    4654 
    4655     if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
    4656     {
    4657         /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on
    4658            every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */
    4659         uint64_t cTicksToDeadline;
    4660         if (   idCurrentCpu == pVCpu->hmr0.s.idLastCpu
    4661             && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
    4662         {
    4663             STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadline);
    4664             fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    4665             cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
    4666             if ((int64_t)cTicksToDeadline > 0)
    4667             { /* hopefully */ }
    4668             else
    4669             {
    4670                 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadlineExpired);
    4671                 cTicksToDeadline = 0;
    4672             }
    4673         }
    4674         else
    4675         {
    4676             STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadline);
    4677             cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
    4678                                                                 &pVCpu->hmr0.s.vmx.uTscDeadline,
    4679                                                                 &pVCpu->hmr0.s.vmx.uTscDeadlineVersion);
    4680             pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline;
    4681             if (cTicksToDeadline >= 128)
    4682             { /* hopefully */ }
    4683             else
    4684                 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadlineExpired);
    4685         }
    4686 
    4687         /* Make sure the returned values have sane upper and lower boundaries. */
    4688         uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
    4689         cTicksToDeadline   = RT_MIN(cTicksToDeadline, u64CpuHz / 64);      /* 1/64th of a second,  15.625ms. */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */
    4690         cTicksToDeadline   = RT_MAX(cTicksToDeadline, u64CpuHz / 32678);   /* 1/32768th of a second,  ~30us. */
    4691         cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
    4692 
    4693         /** @todo r=ramshankar: We need to find a way to integrate nested-guest
    4694          *        preemption timers here. We probably need to clamp the preemption timer,
    4695          *        after converting the timer value to the host. */
    4696         uint32_t const cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    4697         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
    4698         AssertRC(rc);
    4699     }
    4700     else
    4701         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    4702 
    4703     if (fParavirtTsc)
    4704     {
    4705         /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
    4706            information before every VM-entry, hence disable it for performance sake. */
    4707 #if 0
    4708         int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
    4709         AssertRC(rc);
    4710 #endif
    4711         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatTscParavirt);
    4712     }
    4713 
    4714     if (   fOffsettedTsc
    4715         && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit))
    4716     {
    4717         if (pVmxTransient->fIsNestedGuest)
    4718             uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
    4719         vmxHCSetTscOffsetVmcs(pVCpu, pVmcsInfo, uTscOffset);
    4720         vmxHCRemoveProcCtlsVmcs(pVCpu, pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
    4721     }
    4722     else
    4723     {
    4724         /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
    4725         vmxHCSetProcCtlsVmcs(pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
    4726     }
    4727 }
    4728 #endif /* !IN_RING0 */
    47292961
    47302962
     
    49593191
    49603192#ifdef VBOX_STRICT
    4961 # ifdef IN_RING0
     3193# ifndef IN_NEM_DARWIN
    49623194    VMMRZCallRing3Disable(pVCpu);
    49633195# endif
     
    49683200               pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
    49693201# endif
    4970 # ifdef IN_RING0
     3202# ifndef IN_NEM_DARWIN
    49713203    VMMRZCallRing3Enable(pVCpu);
    49723204# endif
     
    51153347
    51163348        pCtx->rflags.u64 = u64Val;
    5117 #ifdef IN_RING0
     3349#ifndef IN_NEM_DARWIN
    51183350        PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
    51193351        if (pVmcsInfoShared->RealMode.fRealOnV86Active)
     
    52043436    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
    52053437
    5206 #ifdef IN_RING0
     3438#ifndef IN_NEM_DARWIN
    52073439    /*
    52083440     * We disable interrupts to make the updating of the state and in particular
     
    53033535                if (fWhat & CPUMCTX_EXTRN_TR)
    53043536                {
    5305 #ifdef IN_RING0
     3537#ifndef IN_NEM_DARWIN
    53063538                    /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
    53073539                       don't need to import that one. */
     
    53143546            if (fWhat & CPUMCTX_EXTRN_DR7)
    53153547            {
    5316 #ifdef IN_RING0
     3548#ifndef IN_NEM_DARWIN
    53173549                if (!pVCpu->hmr0.s.fUsingHyperDR7)
    53183550#endif
     
    53313563            }
    53323564
    5333 #ifdef IN_RING0
     3565#ifndef IN_NEM_DARWIN
    53343566            if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
    53353567            {
     
    53713603                            if (VM_IS_VMX_LBR(pVM))
    53723604                            {
    5373                                 if (vmxHCIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
     3605                                if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
    53743606                                {
    53753607                                    Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
     
    53773609                                    break;
    53783610                                }
    5379                                 if (vmxHCIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
     3611                                if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
    53803612                                {
    53813613                                    Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
     
    54323664                    }
    54333665#endif
    5434 #ifdef IN_RING0
     3666#ifndef IN_NEM_DARWIN
    54353667                    VMMRZCallRing3Disable(pVCpu);   /* May call into PGM which has Log statements. */
    54363668#endif
    54373669                    CPUMSetGuestCR0(pVCpu, u64Cr0);
    5438 #ifdef IN_RING0
     3670#ifndef IN_NEM_DARWIN
    54393671                    VMMRZCallRing3Enable(pVCpu);
    54403672#endif
     
    55443776        }
    55453777    }
    5546 #ifdef IN_RING0
     3778#ifndef IN_NEM_DARWIN
    55473779    else
    55483780        AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
     
    55783810     */
    55793811    if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
    5580 #ifdef IN_RING0
     3812#ifndef IN_NEM_DARWIN
    55813813        && VMMRZCallRing3IsEnabled(pVCpu)
    55823814#endif
     
    56183850static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
    56193851{
    5620 #ifdef IN_RING0
     3852#ifndef IN_NEM_DARWIN
    56213853    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    56223854#endif
     
    58984130
    58994131
    5900 #ifdef IN_RING0
    5901 /**
    5902  * Does the necessary state syncing before returning to ring-3 for any reason
    5903  * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
    5904  *
    5905  * @returns VBox status code.
    5906  * @param   pVCpu           The cross context virtual CPU structure.
    5907  * @param   fImportState    Whether to import the guest state from the VMCS back
    5908  *                          to the guest-CPU context.
    5909  *
    5910  * @remarks No-long-jmp zone!!!
    5911  */
    5912 static int vmxHCLeave(PVMCPUCC pVCpu, bool fImportState)
    5913 {
    5914     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    5915     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    5916 
    5917     RTCPUID const idCpu = RTMpCpuId();
    5918     Log4Func(("HostCpuId=%u\n", idCpu));
    5919 
    5920     /*
    5921      * !!! IMPORTANT !!!
    5922      * If you modify code here, check whether VMXR0CallRing3Callback() needs to be updated too.
    5923      */
    5924 
    5925     /* Save the guest state if necessary. */
    5926     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    5927     if (fImportState)
    5928     {
    5929         int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    5930         AssertRCReturn(rc, rc);
    5931     }
    5932 
    5933     /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
    5934     CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    5935     Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    5936 
    5937     /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
    5938 #ifdef VBOX_STRICT
    5939     if (CPUMIsHyperDebugStateActive(pVCpu))
    5940         Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
    5941 #endif
    5942     CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
    5943     Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    5944     Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    5945 
    5946     /* Restore host-state bits that VT-x only restores partially. */
    5947     if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
    5948     {
    5949         Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, idCpu));
    5950         VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
    5951     }
    5952     pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
    5953 
    5954     /* Restore the lazy host MSRs as we're leaving VT-x context. */
    5955     if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    5956     {
    5957         /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
    5958         if (!fImportState)
    5959         {
    5960             int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
    5961             AssertRCReturn(rc, rc);
    5962         }
    5963         vmxHCLazyRestoreHostMsrs(pVCpu);
    5964         Assert(!pVCpu->hmr0.s.vmx.fLazyMsrs);
    5965     }
    5966     else
    5967         pVCpu->hmr0.s.vmx.fLazyMsrs = 0;
    5968 
    5969     /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    5970     pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    5971 
    5972     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatEntry);
    5973     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState);
    5974     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState);
    5975     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatPreExit);
    5976     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling);
    5977     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitIO);
    5978     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx);
    5979     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi);
    5980     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry);
    5981     STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
    5982 
    5983     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    5984 
    5985     /** @todo This partially defeats the purpose of having preemption hooks.
    5986      *  The problem is, deregistering the hooks should be moved to a place that
    5987      *  lasts until the EMT is about to be destroyed not everytime while leaving HM
    5988      *  context.
    5989      */
    5990     int rc = vmxHCClearVmcs(pVmcsInfo);
    5991     AssertRCReturn(rc, rc);
    5992 
    5993 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5994     /*
    5995      * A valid shadow VMCS is made active as part of VM-entry. It is necessary to
    5996      * clear a shadow VMCS before allowing that VMCS to become active on another
    5997      * logical processor. We may or may not be importing guest state which clears
    5998      * it, so cover for it here.
    5999      *
    6000      * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures".
    6001      */
    6002     if (   pVmcsInfo->pvShadowVmcs
    6003         && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    6004     {
    6005         rc = vmxHCClearShadowVmcs(pVmcsInfo);
    6006         AssertRCReturn(rc, rc);
    6007     }
    6008 
    6009     /*
    6010      * Flag that we need to re-export the host state if we switch to this VMCS before
    6011      * executing guest or nested-guest code.
    6012      */
    6013     pVmcsInfo->idHostCpuState = NIL_RTCPUID;
    6014 #endif
    6015 
    6016     Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    6017     NOREF(idCpu);
    6018     return VINF_SUCCESS;
    6019 }
    6020 
    6021 
    6022 /**
    6023  * Leaves the VT-x session.
    6024  *
    6025  * @returns VBox status code.
    6026  * @param   pVCpu   The cross context virtual CPU structure.
    6027  *
    6028  * @remarks No-long-jmp zone!!!
    6029  */
    6030 static int vmxHCLeaveSession(PVMCPUCC pVCpu)
    6031 {
    6032     HM_DISABLE_PREEMPT(pVCpu);
    6033     HMVMX_ASSERT_CPU_SAFE(pVCpu);
    6034     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6035     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6036 
    6037     /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
    6038        and done this from the VMXR0ThreadCtxCallback(). */
    6039     if (!pVCpu->hmr0.s.fLeaveDone)
    6040     {
    6041         int rc2 = vmxHCLeave(pVCpu, true /* fImportState */);
    6042         AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
    6043         pVCpu->hmr0.s.fLeaveDone = true;
    6044     }
    6045     Assert(!pVCpu->cpum.GstCtx.fExtrn);
    6046 
    6047     /*
    6048      * !!! IMPORTANT !!!
    6049      * If you modify code here, make sure to check whether VMXR0CallRing3Callback() needs to be updated too.
    6050      */
    6051 
    6052     /* Deregister hook now that we've left HM context before re-enabling preemption. */
    6053     /** @todo Deregistering here means we need to VMCLEAR always
    6054      *        (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
    6055      *        for calling VMMR0ThreadCtxHookDisable here! */
    6056     VMMR0ThreadCtxHookDisable(pVCpu);
    6057 
    6058     /* Leave HM context. This takes care of local init (term) and deregistering the longjmp-to-ring-3 callback. */
    6059     int rc = HMR0LeaveCpu(pVCpu);
    6060     HM_RESTORE_PREEMPT();
    6061     return rc;
    6062 }
    6063 
    6064 
    6065 /**
    6066  * Does the necessary state syncing before doing a longjmp to ring-3.
    6067  *
    6068  * @returns VBox status code.
    6069  * @param   pVCpu   The cross context virtual CPU structure.
    6070  *
    6071  * @remarks No-long-jmp zone!!!
    6072  */
    6073 DECLINLINE(int) vmxHCLongJmpToRing3(PVMCPUCC pVCpu)
    6074 {
    6075     return vmxHCLeaveSession(pVCpu);
    6076 }
    6077 
    6078 
    6079 /**
    6080  * Take necessary actions before going back to ring-3.
    6081  *
    6082  * An action requires us to go back to ring-3. This function does the necessary
    6083  * steps before we can safely return to ring-3. This is not the same as longjmps
    6084  * to ring-3, this is voluntary and prepares the guest so it may continue
    6085  * executing outside HM (recompiler/IEM).
    6086  *
    6087  * @returns VBox status code.
    6088  * @param   pVCpu   The cross context virtual CPU structure.
    6089  * @param   rcExit  The reason for exiting to ring-3. Can be
    6090  *                  VINF_VMM_UNKNOWN_RING3_CALL.
    6091  */
    6092 static int vmxHCExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
    6093 {
    6094     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6095 
    6096     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    6097     if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
    6098     {
    6099         VMXGetCurrentVmcs(&VCPU_2_VMXSTATE(pVCpu).vmx.LastError.HCPhysCurrentVmcs);
    6100         VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32VmcsRev   = *(uint32_t *)pVmcsInfo->pvVmcs;
    6101         VCPU_2_VMXSTATE(pVCpu).vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
    6102         /* LastError.idCurrentCpu was updated in vmxHCPreRunGuestCommitted(). */
    6103     }
    6104 
    6105     /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    6106     VMMRZCallRing3Disable(pVCpu);
    6107     Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
    6108 
    6109     /*
    6110      * Convert any pending HM events back to TRPM due to premature exits to ring-3.
    6111      * We need to do this only on returns to ring-3 and not for longjmps to ring3.
    6112      *
    6113      * This is because execution may continue from ring-3 and we would need to inject
    6114      * the event from there (hence place it back in TRPM).
    6115      */
    6116     if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
    6117     {
    6118         vmxHCPendingEventToTrpmTrap(pVCpu);
    6119         Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    6120 
    6121         /* Clear the events from the VMCS. */
    6122         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);    AssertRC(rc);
    6123         rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);         AssertRC(rc);
    6124     }
    6125 #ifdef VBOX_STRICT
    6126     /*
    6127      * We check for rcExit here since for errors like VERR_VMX_UNABLE_TO_START_VM (which are
    6128      * fatal), we don't care about verifying duplicate injection of events. Errors like
    6129      * VERR_EM_INTERPRET are converted to their VINF_* counterparts -prior- to  calling this
    6130      * function so those should and will be checked below.
    6131      */
    6132     else if (RT_SUCCESS(rcExit))
    6133     {
    6134         /*
    6135          * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
    6136          * This can be pretty hard to debug otherwise, interrupts might get injected twice
    6137          * occasionally, see @bugref{9180#c42}.
    6138          *
    6139          * However, if the VM-entry failed, any VM entry-interruption info. field would
    6140          * be left unmodified as the event would not have been injected to the guest. In
    6141          * such cases, don't assert, we're not going to continue guest execution anyway.
    6142          */
    6143         uint32_t uExitReason;
    6144         uint32_t uEntryIntInfo;
    6145         int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
    6146         rc    |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
    6147         AssertRC(rc);
    6148         AssertMsg(VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason) || !VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo),
    6149                   ("uExitReason=%#RX32 uEntryIntInfo=%#RX32 rcExit=%d\n", uExitReason, uEntryIntInfo, VBOXSTRICTRC_VAL(rcExit)));
    6150     }
    6151 #endif
    6152 
    6153     /*
    6154      * Clear the interrupt-window and NMI-window VMCS controls as we could have got
    6155      * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
    6156      * (e.g. TPR below threshold).
    6157      */
    6158     if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    6159     {
    6160         vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
    6161         vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    6162     }
    6163 
    6164     /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
    6165        and if we're injecting an event we should have a TRPM trap pending. */
    6166     AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    6167 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
    6168     AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    6169 #endif
    6170 
    6171     /* Save guest state and restore host state bits. */
    6172     int rc = vmxHCLeaveSession(pVCpu);
    6173     AssertRCReturn(rc, rc);
    6174     STAM_COUNTER_DEC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
    6175 
    6176     /* Thread-context hooks are unregistered at this point!!! */
    6177     /* Ring-3 callback notifications are unregistered at this point!!! */
    6178 
    6179     /* Sync recompiler state. */
    6180     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    6181     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
    6182                              | CPUM_CHANGED_LDTR
    6183                              | CPUM_CHANGED_GDTR
    6184                              | CPUM_CHANGED_IDTR
    6185                              | CPUM_CHANGED_TR
    6186                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
    6187     if (   pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
    6188         && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
    6189         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    6190 
    6191     Assert(!pVCpu->hmr0.s.fClearTrapFlag);
    6192 
    6193     /* Update the exit-to-ring 3 reason. */
    6194     VCPU_2_VMXSTATE(pVCpu).rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
    6195 
    6196     /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    6197     if (   rcExit != VINF_EM_RAW_INTERRUPT
    6198         || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    6199     {
    6200         Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
    6201         ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    6202     }
    6203 
    6204     STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchExitToR3);
    6205     VMMRZCallRing3Enable(pVCpu);
    6206     return rc;
    6207 }
    6208 
    6209 
    6210 /**
    6211  * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
    6212  * stack.
    6213  *
    6214  * @returns Strict VBox status code (i.e. informational status codes too).
    6215  * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    6216  * @param   pVCpu   The cross context virtual CPU structure.
    6217  * @param   uValue  The value to push to the guest stack.
    6218  */
    6219 static VBOXSTRICTRC vmxHCRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
    6220 {
    6221     /*
    6222      * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
    6223      * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
    6224      * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
    6225      */
    6226     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    6227     if (pCtx->sp == 1)
    6228         return VINF_EM_RESET;
    6229     pCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
    6230     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
    6231     AssertRC(rc);
    6232     return rc;
    6233 }
    6234 #endif /* !IN_RING */
    6235 
    62364132/**
    62374133 * Injects an event into the guest upon VM-entry by updating the relevant fields
     
    62594155    Assert(pfIntrState);
    62604156
    6261 #ifndef IN_RING0
     4157#ifdef IN_NEM_DARWIN
    62624158    RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
    62634159#endif
     
    63274223    if (CPUMIsGuestInRealModeEx(pCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    63284224    {
    6329 #ifdef IN_RING0
     4225#ifndef IN_NEM_DARWIN
    63304226        if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
    63314227#endif
     
    63394235            u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
    63404236        }
    6341 #ifdef IN_RING0
     4237#ifndef IN_NEM_DARWIN
    63424238        else
    63434239        {
     
    64094305            /* Construct the stack frame for the interrupt/exception handler. */
    64104306            VBOXSTRICTRC rcStrict;
    6411             rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
     4307            rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
    64124308            if (rcStrict == VINF_SUCCESS)
    64134309            {
    6414                 rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
     4310                rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
    64154311                if (rcStrict == VINF_SUCCESS)
    6416                     rcStrict = vmxHCRealModeGuestStackPush(pVCpu, uGuestIp);
     4312                    rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
    64174313            }
    64184314
     
    66754571{
    66764572    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6677 #ifdef IN_RING0
     4573#ifndef IN_NEM_DARWIN
    66784574    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    66794575#endif
     
    67954691
    67964692
    6797 #ifdef IN_RING0
    6798 /**
    6799  * Exports the guest state into the VMCS guest-state area.
    6800  *
    6801  * The will typically be done before VM-entry when the guest-CPU state and the
    6802  * VMCS state may potentially be out of sync.
    6803  *
    6804  * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
    6805  * VM-entry controls.
    6806  * Sets up the appropriate VMX non-root function to execute guest code based on
    6807  * the guest CPU mode.
    6808  *
    6809  * @returns VBox strict status code.
    6810  * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    6811  *          without unrestricted guest execution and the VMMDev is not presently
    6812  *          mapped (e.g. EFI32).
    6813  *
    6814  * @param   pVCpu           The cross context virtual CPU structure.
    6815  * @param   pVmxTransient   The VMX-transient structure.
    6816  *
    6817  * @remarks No-long-jump zone!!!
    6818  */
    6819 static VBOXSTRICTRC vmxHCExportGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6820 {
    6821     AssertPtr(pVCpu);
    6822     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6823     LogFlowFunc(("pVCpu=%p\n", pVCpu));
    6824 
    6825     STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
    6826 
    6827     /*
    6828      * Determine real-on-v86 mode.
    6829      * Used when the guest is in real-mode and unrestricted guest execution is not used.
    6830      */
    6831     PVMXVMCSINFOSHARED pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
    6832     if (    pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest
    6833         || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
    6834         pVmcsInfoShared->RealMode.fRealOnV86Active = false;
    6835     else
    6836     {
    6837         Assert(!pVmxTransient->fIsNestedGuest);
    6838         pVmcsInfoShared->RealMode.fRealOnV86Active = true;
    6839     }
    6840 
    6841     /*
    6842      * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
    6843      * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    6844      */
    6845     int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
    6846     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6847 
    6848     rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
    6849     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6850 
    6851     VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
    6852     if (rcStrict == VINF_SUCCESS)
    6853     { /* likely */ }
    6854     else
    6855     {
    6856         Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
    6857         return rcStrict;
    6858     }
    6859 
    6860     rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
    6861     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6862 
    6863     rc = vmxHCExportGuestMsrs(pVCpu, pVmxTransient);
    6864     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6865 
    6866     vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
    6867     vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
    6868     vmxHCExportGuestRip(pVCpu);
    6869     vmxHCExportGuestRsp(pVCpu);
    6870     vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6871 
    6872     rc = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
    6873     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6874 
    6875     /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    6876     ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
    6877                                                   |  HM_CHANGED_GUEST_CR2
    6878                                                   | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
    6879                                                   |  HM_CHANGED_GUEST_X87
    6880                                                   |  HM_CHANGED_GUEST_SSE_AVX
    6881                                                   |  HM_CHANGED_GUEST_OTHER_XSAVE
    6882                                                   |  HM_CHANGED_GUEST_XCRx
    6883                                                   |  HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
    6884                                                   |  HM_CHANGED_GUEST_SYSCALL_MSRS   /* Part of lazy or auto load-store MSRs. */
    6885                                                   |  HM_CHANGED_GUEST_TSC_AUX
    6886                                                   |  HM_CHANGED_GUEST_OTHER_MSRS
    6887                                                   | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
    6888 
    6889     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
    6890     return rc;
    6891 }
    6892 
    6893 
    6894 /**
    6895  * Exports the state shared between the host and guest into the VMCS.
    6896  *
    6897  * @param   pVCpu           The cross context virtual CPU structure.
    6898  * @param   pVmxTransient   The VMX-transient structure.
    6899  *
    6900  * @remarks No-long-jump zone!!!
    6901  */
    6902 static void vmxHCExportSharedState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6903 {
    6904     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6905     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6906 
    6907     if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
    6908     {
    6909         int rc = vmxHCExportSharedDebugState(pVCpu, pVmxTransient);
    6910         AssertRC(rc);
    6911         VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
    6912 
    6913         /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    6914         if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    6915             vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6916     }
    6917 
    6918     if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    6919     {
    6920         vmxHCLazyLoadGuestMsrs(pVCpu);
    6921         VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
    6922     }
    6923 
    6924     AssertMsg(!(VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
    6925               ("fCtxChanged=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).fCtxChanged));
    6926 }
    6927 
    6928 
    6929 /**
    6930  * Worker for loading the guest-state bits in the inner VT-x execution loop.
    6931  *
    6932  * @returns Strict VBox status code (i.e. informational status codes too).
    6933  * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    6934  *          without unrestricted guest execution and the VMMDev is not presently
    6935  *          mapped (e.g. EFI32).
    6936  *
    6937  * @param   pVCpu           The cross context virtual CPU structure.
    6938  * @param   pVmxTransient   The VMX-transient structure.
    6939  *
    6940  * @remarks No-long-jump zone!!!
    6941  */
    6942 static VBOXSTRICTRC vmxHCExportGuestStateOptimal(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6943 {
    6944     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6945     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6946 
    6947 #ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    6948     ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    6949 #endif
    6950 
    6951     /*
    6952      * For many VM-exits only RIP/RSP/RFLAGS (and HWVIRT state when executing a nested-guest)
    6953      * changes. First try to export only these without going through all other changed-flag checks.
    6954      */
    6955     VBOXSTRICTRC   rcStrict;
    6956     uint64_t const fCtxMask     = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
    6957     uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
    6958     uint64_t const fCtxChanged  = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    6959 
    6960     /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
    6961     if (    (fCtxChanged & fMinimalMask)
    6962         && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
    6963     {
    6964         vmxHCExportGuestRip(pVCpu);
    6965         vmxHCExportGuestRsp(pVCpu);
    6966         vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6967         rcStrict = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
    6968         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportMinimal);
    6969     }
    6970     /* If anything else also changed, go through the full export routine and export as required. */
    6971     else if (fCtxChanged & fCtxMask)
    6972     {
    6973         rcStrict = vmxHCExportGuestState(pVCpu, pVmxTransient);
    6974         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    6975         { /* likely */}
    6976         else
    6977         {
    6978             AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
    6979                                                            VBOXSTRICTRC_VAL(rcStrict)));
    6980             Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6981             return rcStrict;
    6982         }
    6983         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportFull);
    6984     }
    6985     /* Nothing changed, nothing to load here. */
    6986     else
    6987         rcStrict = VINF_SUCCESS;
    6988 
    6989 #ifdef VBOX_STRICT
    6990     /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    6991     uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    6992     AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur));
    6993 #endif
    6994     return rcStrict;
    6995 }
    6996 #endif
    6997 
    6998 
    69994693/**
    70004694 * Tries to determine what part of the guest-state VT-x has deemed as invalid
     
    70214715    uint32_t uError = VMX_IGS_ERROR;
    70224716    uint32_t u32IntrState = 0;
    7023 #ifdef IN_RING0
     4717#ifndef IN_NEM_DARWIN
    70244718    PVMCC    pVM    = pVCpu->CTX_SUFF(pVM);
    70254719    bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
     
    71524846            HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
    71534847
    7154 #ifdef IN_RING0
     4848#ifndef IN_NEM_DARWIN
    71554849        rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
    71564850        AssertRC(rc);
     
    75165210        }
    75175211
    7518 #ifdef IN_RING0
     5212#ifndef IN_NEM_DARWIN
    75195213        /* VMCS link pointer. */
    75205214        rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
     
    75715265#undef HMVMX_CHECK_BREAK
    75725266}
    7573 
    7574 
    7575 #ifdef IN_RING0
    7576 /**
    7577  * Map the APIC-access page for virtualizing APIC accesses.
    7578  *
    7579  * This can cause a longjumps to R3 due to the acquisition of the PGM lock. Hence,
    7580  * this not done as part of exporting guest state, see @bugref{8721}.
    7581  *
    7582  * @returns VBox status code.
    7583  * @param   pVCpu   The cross context virtual CPU structure.
    7584  */
    7585 static int vmxHCMapHCApicAccessPage(PVMCPUCC pVCpu)
    7586 {
    7587     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    7588     uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    7589 
    7590     Assert(PDMHasApic(pVM));
    7591     Assert(u64MsrApicBase);
    7592 
    7593     RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
    7594     Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
    7595 
    7596     /* Unalias the existing mapping. */
    7597     int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
    7598     AssertRCReturn(rc, rc);
    7599 
    7600     /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
    7601     Assert(pVM->hmr0.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
    7602     rc = IOMR0MmioMapMmioHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hmr0.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
    7603     AssertRCReturn(rc, rc);
    7604 
    7605     /* Update the per-VCPU cache of the APIC base MSR. */
    7606     VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase = u64MsrApicBase;
    7607     return VINF_SUCCESS;
    7608 }
    7609 
    7610 
    7611 /**
    7612  * Worker function passed to RTMpOnSpecific() that is to be called on the target
    7613  * CPU.
    7614  *
    7615  * @param   idCpu       The ID for the CPU the function is called on.
    7616  * @param   pvUser1     Null, not used.
    7617  * @param   pvUser2     Null, not used.
    7618  */
    7619 static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    7620 {
    7621     RT_NOREF3(idCpu, pvUser1, pvUser2);
    7622     VMXDispatchHostNmi();
    7623 }
    7624 
    7625 
    7626 /**
    7627  * Dispatching an NMI on the host CPU that received it.
    7628  *
    7629  * @returns VBox status code.
    7630  * @param   pVCpu       The cross context virtual CPU structure.
    7631  * @param   pVmcsInfo   The VMCS info. object corresponding to the VMCS that was
    7632  *                      executing when receiving the host NMI in VMX non-root
    7633  *                      operation.
    7634  */
    7635 static int vmxHCExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    7636 {
    7637     RTCPUID const idCpu = pVmcsInfo->idHostCpuExec;
    7638     Assert(idCpu != NIL_RTCPUID);
    7639 
    7640     /*
    7641      * We don't want to delay dispatching the NMI any more than we have to. However,
    7642      * we have already chosen -not- to dispatch NMIs when interrupts were still disabled
    7643      * after executing guest or nested-guest code for the following reasons:
    7644      *
    7645      *   - We would need to perform VMREADs with interrupts disabled and is orders of
    7646      *     magnitude worse when we run as a nested hypervisor without VMCS shadowing
    7647      *     supported by the host hypervisor.
    7648      *
    7649      *   - It affects the common VM-exit scenario and keeps interrupts disabled for a
    7650      *     longer period of time just for handling an edge case like host NMIs which do
    7651      *     not occur nearly as frequently as other VM-exits.
    7652      *
    7653      * Let's cover the most likely scenario first. Check if we are on the target CPU
    7654      * and dispatch the NMI right away. This should be much faster than calling into
    7655      * RTMpOnSpecific() machinery.
    7656      */
    7657     bool fDispatched = false;
    7658     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    7659     if (idCpu == RTMpCpuId())
    7660     {
    7661         VMXDispatchHostNmi();
    7662         fDispatched = true;
    7663     }
    7664     ASMSetFlags(fEFlags);
    7665     if (fDispatched)
    7666     {
    7667         STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGC);
    7668         return VINF_SUCCESS;
    7669     }
    7670 
    7671     /*
    7672      * RTMpOnSpecific() waits until the worker function has run on the target CPU. So
    7673      * there should be no race or recursion even if we are unlucky enough to be preempted
    7674      * (to the target CPU) without dispatching the host NMI above.
    7675      */
    7676     STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGCIpi);
    7677     return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */,  NULL /* pvUser2 */);
    7678 }
    7679 
    7680 
    7681 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    7682 /**
    7683  * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
    7684  * nested-guest using hardware-assisted VMX.
    7685  *
    7686  * @param   pVCpu               The cross context virtual CPU structure.
    7687  * @param   pVmcsInfoNstGst     The nested-guest VMCS info. object.
    7688  * @param   pVmcsInfoGst        The guest VMCS info. object.
    7689  */
    7690 static void vmxHCMergeMsrBitmapNested(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
    7691 {
    7692     uint32_t const cbMsrBitmap    = X86_PAGE_4K_SIZE;
    7693     uint64_t       *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
    7694     Assert(pu64MsrBitmap);
    7695 
    7696     /*
    7697      * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
    7698      * MSR that is intercepted by the guest is also intercepted while executing the
    7699      * nested-guest using hardware-assisted VMX.
    7700      *
    7701      * Note! If the nested-guest is not using an MSR bitmap, every MSR must cause a
    7702      *       nested-guest VM-exit even if the outer guest is not intercepting some
    7703      *       MSRs. We cannot assume the caller has initialized the nested-guest
    7704      *       MSR bitmap in this case.
    7705      *
    7706      *       The nested hypervisor may also switch whether it uses MSR bitmaps for
    7707      *       each of its VM-entry, hence initializing it once per-VM while setting
    7708      *       up the nested-guest VMCS is not sufficient.
    7709      */
    7710     PCVMXVVMCS const pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    7711     if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    7712     {
    7713         uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0];
    7714         uint64_t const *pu64MsrBitmapGst    = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
    7715         Assert(pu64MsrBitmapNstGst);
    7716         Assert(pu64MsrBitmapGst);
    7717 
    7718         /** @todo Detect and use EVEX.POR? */
    7719         uint32_t const cFrags = cbMsrBitmap / sizeof(uint64_t);
    7720         for (uint32_t i = 0; i < cFrags; i++)
    7721             pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
    7722     }
    7723     else
    7724         ASMMemFill32(pu64MsrBitmap, cbMsrBitmap, UINT32_C(0xffffffff));
    7725 }
    7726 
    7727 
    7728 /**
    7729  * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
    7730  * hardware-assisted VMX execution of the nested-guest.
    7731  *
    7732  * For a guest, we don't modify these controls once we set up the VMCS and hence
    7733  * this function is never called.
    7734  *
    7735  * For nested-guests since the nested hypervisor provides these controls on every
    7736  * nested-guest VM-entry and could potentially change them everytime we need to
    7737  * merge them before every nested-guest VM-entry.
    7738  *
    7739  * @returns VBox status code.
    7740  * @param   pVCpu   The cross context virtual CPU structure.
    7741  */
    7742 static int vmxHCMergeVmcsNested(PVMCPUCC pVCpu)
    7743 {
    7744     PVMCC const         pVM          = pVCpu->CTX_SUFF(pVM);
    7745     PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
    7746     PCVMXVVMCS const    pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    7747 
    7748     /*
    7749      * Merge the controls with the requirements of the guest VMCS.
    7750      *
    7751      * We do not need to validate the nested-guest VMX features specified in the nested-guest
    7752      * VMCS with the features supported by the physical CPU as it's already done by the
    7753      * VMLAUNCH/VMRESUME instruction emulation.
    7754      *
    7755      * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the guest are
    7756      * derived from the VMX features supported by the physical CPU.
    7757      */
    7758 
    7759     /* Pin-based VM-execution controls. */
    7760     uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
    7761 
    7762     /* Processor-based VM-execution controls. */
    7763     uint32_t       u32ProcCtls = (pVmcsNstGst->u32ProcCtls  & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
    7764                                | (pVmcsInfoGst->u32ProcCtls & ~(  VMX_PROC_CTLS_INT_WINDOW_EXIT
    7765                                                                 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
    7766                                                                 | VMX_PROC_CTLS_MOV_DR_EXIT
    7767                                                                 | VMX_PROC_CTLS_USE_TPR_SHADOW
    7768                                                                 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
    7769 
    7770     /* Secondary processor-based VM-execution controls. */
    7771     uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2  & ~VMX_PROC_CTLS2_VPID)
    7772                                 | (pVmcsInfoGst->u32ProcCtls2 & ~(  VMX_PROC_CTLS2_VIRT_APIC_ACCESS
    7773                                                                   | VMX_PROC_CTLS2_INVPCID
    7774                                                                   | VMX_PROC_CTLS2_VMCS_SHADOWING
    7775                                                                   | VMX_PROC_CTLS2_RDTSCP
    7776                                                                   | VMX_PROC_CTLS2_XSAVES_XRSTORS
    7777                                                                   | VMX_PROC_CTLS2_APIC_REG_VIRT
    7778                                                                   | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
    7779                                                                   | VMX_PROC_CTLS2_VMFUNC));
    7780 
    7781     /*
    7782      * VM-entry controls:
    7783      * These controls contains state that depends on the nested-guest state (primarily
    7784      * EFER MSR) and is thus not constant between VMLAUNCH/VMRESUME and the nested-guest
    7785      * VM-exit. Although the nested hypervisor cannot change it, we need to in order to
    7786      * properly continue executing the nested-guest if the EFER MSR changes but does not
    7787      * cause a nested-guest VM-exits.
    7788      *
    7789      * VM-exit controls:
    7790      * These controls specify the host state on return. We cannot use the controls from
    7791      * the nested hypervisor state as is as it would contain the guest state rather than
    7792      * the host state. Since the host state is subject to change (e.g. preemption, trips
    7793      * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
    7794      * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
    7795      *
    7796      * VM-entry MSR-load:
    7797      * The guest MSRs from the VM-entry MSR-load area are already loaded into the guest-CPU
    7798      * context by the VMLAUNCH/VMRESUME instruction emulation.
    7799      *
    7800      * VM-exit MSR-store:
    7801      * The VM-exit emulation will take care of populating the MSRs from the guest-CPU context
    7802      * back into the VM-exit MSR-store area.
    7803      *
    7804      * VM-exit MSR-load areas:
    7805      * This must contain the real host MSRs with hardware-assisted VMX execution. Hence, we
    7806      * can entirely ignore what the nested hypervisor wants to load here.
    7807      */
    7808 
    7809     /*
    7810      * Exception bitmap.
    7811      *
    7812      * We could remove #UD from the guest bitmap and merge it with the nested-guest bitmap
    7813      * here (and avoid doing anything while exporting nested-guest state), but to keep the
    7814      * code more flexible if intercepting exceptions become more dynamic in the future we do
    7815      * it as part of exporting the nested-guest state.
    7816      */
    7817     uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
    7818 
    7819     /*
    7820      * CR0/CR4 guest/host mask.
    7821      *
    7822      * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest must
    7823      * cause VM-exits, so we need to merge them here.
    7824      */
    7825     uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
    7826     uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
    7827 
    7828     /*
    7829      * Page-fault error-code mask and match.
    7830      *
    7831      * Although we require unrestricted guest execution (and thereby nested-paging) for
    7832      * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
    7833      * normally intercept #PFs, it might intercept them for debugging purposes.
    7834      *
    7835      * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF filters.
    7836      * If the outer guest is intercepting #PFs, we must intercept all #PFs.
    7837      */
    7838     uint32_t u32XcptPFMask;
    7839     uint32_t u32XcptPFMatch;
    7840     if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
    7841     {
    7842         u32XcptPFMask  = pVmcsNstGst->u32XcptPFMask;
    7843         u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
    7844     }
    7845     else
    7846     {
    7847         u32XcptPFMask  = 0;
    7848         u32XcptPFMatch = 0;
    7849     }
    7850 
    7851     /*
    7852      * Pause-Loop exiting.
    7853      */
    7854     /** @todo r=bird: given that both pVM->hm.s.vmx.cPleGapTicks and
    7855      *        pVM->hm.s.vmx.cPleWindowTicks defaults to zero, I cannot see how
    7856      *        this will work... */
    7857     uint32_t const cPleGapTicks    = RT_MIN(pVM->hm.s.vmx.cPleGapTicks,    pVmcsNstGst->u32PleGap);
    7858     uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
    7859 
    7860     /*
    7861      * Pending debug exceptions.
    7862      * Currently just copy whatever the nested-guest provides us.
    7863      */
    7864     uint64_t const uPendingDbgXcpts = pVmcsNstGst->u64GuestPendingDbgXcpts.u;
    7865 
    7866     /*
    7867      * I/O Bitmap.
    7868      *
    7869      * We do not use the I/O bitmap that may be provided by the nested hypervisor as we always
    7870      * intercept all I/O port accesses.
    7871      */
    7872     Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
    7873     Assert(!(u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS));
    7874 
    7875     /*
    7876      * VMCS shadowing.
    7877      *
    7878      * We do not yet expose VMCS shadowing to the guest and thus VMCS shadowing should not be
    7879      * enabled while executing the nested-guest.
    7880      */
    7881     Assert(!(u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING));
    7882 
    7883     /*
    7884      * APIC-access page.
    7885      */
    7886     RTHCPHYS HCPhysApicAccess;
    7887     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    7888     {
    7889         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    7890         RTGCPHYS const GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u;
    7891 
    7892         /** @todo NSTVMX: This is not really correct but currently is required to make
    7893          *        things work. We need to re-enable the page handler when we fallback to
    7894          *        IEM execution of the nested-guest! */
    7895         PGMHandlerPhysicalPageTempOff(pVM, GCPhysApicAccess, GCPhysApicAccess);
    7896 
    7897         void          *pvPage;
    7898         PGMPAGEMAPLOCK PgLockApicAccess;
    7899         int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysApicAccess, &pvPage, &PgLockApicAccess);
    7900         if (RT_SUCCESS(rc))
    7901         {
    7902             rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysApicAccess, &HCPhysApicAccess);
    7903             AssertMsgRCReturn(rc, ("Failed to get host-physical address for APIC-access page at %#RGp\n", GCPhysApicAccess), rc);
    7904 
    7905             /** @todo Handle proper releasing of page-mapping lock later. */
    7906             PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockApicAccess);
    7907         }
    7908         else
    7909             return rc;
    7910     }
    7911     else
    7912         HCPhysApicAccess = 0;
    7913 
    7914     /*
    7915      * Virtual-APIC page and TPR threshold.
    7916      */
    7917     RTHCPHYS HCPhysVirtApic;
    7918     uint32_t u32TprThreshold;
    7919     if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    7920     {
    7921         Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW);
    7922         RTGCPHYS const GCPhysVirtApic = pVmcsNstGst->u64AddrVirtApic.u;
    7923 
    7924         void          *pvPage;
    7925         PGMPAGEMAPLOCK PgLockVirtApic;
    7926         int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysVirtApic, &pvPage, &PgLockVirtApic);
    7927         if (RT_SUCCESS(rc))
    7928         {
    7929             rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysVirtApic, &HCPhysVirtApic);
    7930             AssertMsgRCReturn(rc, ("Failed to get host-physical address for virtual-APIC page at %#RGp\n", GCPhysVirtApic), rc);
    7931 
    7932             /** @todo Handle proper releasing of page-mapping lock later. */
    7933             PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockVirtApic);
    7934         }
    7935         else
    7936             return rc;
    7937 
    7938         u32TprThreshold = pVmcsNstGst->u32TprThreshold;
    7939     }
    7940     else
    7941     {
    7942         HCPhysVirtApic  = 0;
    7943         u32TprThreshold = 0;
    7944 
    7945         /*
    7946          * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
    7947          * used by the nested hypervisor. Preventing MMIO accesses to the physical APIC will
    7948          * be taken care of by EPT/shadow paging.
    7949          */
    7950         if (pVM->hmr0.s.fAllow64BitGuests)
    7951             u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
    7952                         |  VMX_PROC_CTLS_CR8_LOAD_EXIT;
    7953     }
    7954 
    7955     /*
    7956      * Validate basic assumptions.
    7957      */
    7958     PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
    7959     Assert(VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
    7960     Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
    7961     Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
    7962 
    7963     /*
    7964      * Commit it to the nested-guest VMCS.
    7965      */
    7966     int rc = VINF_SUCCESS;
    7967     if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
    7968         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
    7969     if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
    7970         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
    7971     if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
    7972         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
    7973     if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
    7974         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
    7975     if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
    7976         rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
    7977     if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
    7978         rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
    7979     if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
    7980         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
    7981     if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
    7982         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
    7983     if (   !(u32ProcCtls  & VMX_PROC_CTLS_PAUSE_EXIT)
    7984         &&  (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
    7985     {
    7986         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
    7987         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
    7988         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
    7989     }
    7990     if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    7991     {
    7992         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
    7993         rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
    7994     }
    7995     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    7996         rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
    7997     rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, uPendingDbgXcpts);
    7998     AssertRC(rc);
    7999 
    8000     /*
    8001      * Update the nested-guest VMCS cache.
    8002      */
    8003     pVmcsInfoNstGst->u32PinCtls     = u32PinCtls;
    8004     pVmcsInfoNstGst->u32ProcCtls    = u32ProcCtls;
    8005     pVmcsInfoNstGst->u32ProcCtls2   = u32ProcCtls2;
    8006     pVmcsInfoNstGst->u32XcptBitmap  = u32XcptBitmap;
    8007     pVmcsInfoNstGst->u64Cr0Mask     = u64Cr0Mask;
    8008     pVmcsInfoNstGst->u64Cr4Mask     = u64Cr4Mask;
    8009     pVmcsInfoNstGst->u32XcptPFMask  = u32XcptPFMask;
    8010     pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
    8011     pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
    8012 
    8013     /*
    8014      * We need to flush the TLB if we are switching the APIC-access page address.
    8015      * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
    8016      */
    8017     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    8018         VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedNstGstFlushTlb = true;
    8019 
    8020     /*
    8021      * MSR bitmap.
    8022      *
    8023      * The MSR bitmap address has already been initialized while setting up the nested-guest
    8024      * VMCS, here we need to merge the MSR bitmaps.
    8025      */
    8026     if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    8027         vmxHCMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
    8028 
    8029     return VINF_SUCCESS;
    8030 }
    8031 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    8032 
    8033 
    8034 /**
    8035  * Runs the guest code using hardware-assisted VMX the normal way.
    8036  *
    8037  * @returns VBox status code.
    8038  * @param   pVCpu       The cross context virtual CPU structure.
    8039  * @param   pcLoops     Pointer to the number of executed loops.
    8040  */
    8041 static VBOXSTRICTRC vmxHCRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
    8042 {
    8043     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    8044     Assert(pcLoops);
    8045     Assert(*pcLoops <= cMaxResumeLoops);
    8046     Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    8047 
    8048 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    8049     /*
    8050      * Switch to the guest VMCS as we may have transitioned from executing the nested-guest
    8051      * without leaving ring-0. Otherwise, if we came from ring-3 we would have loaded the
    8052      * guest VMCS while entering the VMX ring-0 session.
    8053      */
    8054     if (pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    8055     {
    8056         int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
    8057         if (RT_SUCCESS(rc))
    8058         { /* likely */ }
    8059         else
    8060         {
    8061             LogRelFunc(("Failed to switch to the guest VMCS. rc=%Rrc\n", rc));
    8062             return rc;
    8063         }
    8064     }
    8065 #endif
    8066 
    8067     VMXTRANSIENT VmxTransient;
    8068     RT_ZERO(VmxTransient);
    8069     VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    8070 
    8071     /* Paranoia. */
    8072     Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfo);
    8073 
    8074     VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
    8075     for (;;)
    8076     {
    8077         Assert(!HMR0SuspendPending());
    8078         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    8079         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8080 
    8081         /*
    8082          * Preparatory work for running nested-guest code, this may force us to
    8083          * return to ring-3.
    8084          *
    8085          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    8086          */
    8087         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    8088         if (rcStrict != VINF_SUCCESS)
    8089             break;
    8090 
    8091         /* Interrupts are disabled at this point! */
    8092         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    8093         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    8094         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    8095         /* Interrupts are re-enabled at this point! */
    8096 
    8097         /*
    8098          * Check for errors with running the VM (VMLAUNCH/VMRESUME).
    8099          */
    8100         if (RT_SUCCESS(rcRun))
    8101         { /* very likely */ }
    8102         else
    8103         {
    8104             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    8105             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    8106             return rcRun;
    8107         }
    8108 
    8109         /*
    8110          * Profile the VM-exit.
    8111          */
    8112         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    8113         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    8114         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    8115         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8116         HMVMX_START_EXIT_DISPATCH_PROF();
    8117 
    8118         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    8119 
    8120         /*
    8121          * Handle the VM-exit.
    8122          */
    8123 #ifdef HMVMX_USE_FUNCTION_TABLE
    8124         rcStrict = g_aVMExitHandlers[VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);
    8125 #else
    8126         rcStrict = vmxHCHandleExit(pVCpu, &VmxTransient);
    8127 #endif
    8128         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8129         if (rcStrict == VINF_SUCCESS)
    8130         {
    8131             if (++(*pcLoops) <= cMaxResumeLoops)
    8132                 continue;
    8133             STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    8134             rcStrict = VINF_EM_RAW_INTERRUPT;
    8135         }
    8136         break;
    8137     }
    8138 
    8139     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8140     return rcStrict;
    8141 }
    8142 
    8143 
    8144 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    8145 /**
    8146  * Runs the nested-guest code using hardware-assisted VMX.
    8147  *
    8148  * @returns VBox status code.
    8149  * @param   pVCpu       The cross context virtual CPU structure.
    8150  * @param   pcLoops     Pointer to the number of executed loops.
    8151  *
    8152  * @sa      vmxHCRunGuestCodeNormal.
    8153  */
    8154 static VBOXSTRICTRC vmxHCRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
    8155 {
    8156     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    8157     Assert(pcLoops);
    8158     Assert(*pcLoops <= cMaxResumeLoops);
    8159     Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    8160 
    8161     /*
    8162      * Switch to the nested-guest VMCS as we may have transitioned from executing the
    8163      * guest without leaving ring-0. Otherwise, if we came from ring-3 we would have
    8164      * loaded the nested-guest VMCS while entering the VMX ring-0 session.
    8165      */
    8166     if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    8167     {
    8168         int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
    8169         if (RT_SUCCESS(rc))
    8170         { /* likely */ }
    8171         else
    8172         {
    8173             LogRelFunc(("Failed to switch to the nested-guest VMCS. rc=%Rrc\n", rc));
    8174             return rc;
    8175         }
    8176     }
    8177 
    8178     VMXTRANSIENT VmxTransient;
    8179     RT_ZERO(VmxTransient);
    8180     VmxTransient.pVmcsInfo      = hmGetVmxActiveVmcsInfo(pVCpu);
    8181     VmxTransient.fIsNestedGuest = true;
    8182 
    8183     /* Paranoia. */
    8184     Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfoNstGst);
    8185 
    8186     VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
    8187     for (;;)
    8188     {
    8189         Assert(!HMR0SuspendPending());
    8190         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    8191         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8192 
    8193         /*
    8194          * Preparatory work for running guest code, this may force us to
    8195          * return to ring-3.
    8196          *
    8197          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    8198          */
    8199         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    8200         if (rcStrict != VINF_SUCCESS)
    8201             break;
    8202 
    8203         /* Interrupts are disabled at this point! */
    8204         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    8205         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    8206         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    8207         /* Interrupts are re-enabled at this point! */
    8208 
    8209         /*
    8210          * Check for errors with running the VM (VMLAUNCH/VMRESUME).
    8211          */
    8212         if (RT_SUCCESS(rcRun))
    8213         { /* very likely */ }
    8214         else
    8215         {
    8216             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    8217             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    8218             return rcRun;
    8219         }
    8220 
    8221         /*
    8222          * Profile the VM-exit.
    8223          */
    8224         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    8225         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    8226         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitAll);
    8227         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    8228         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8229         HMVMX_START_EXIT_DISPATCH_PROF();
    8230 
    8231         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    8232 
    8233         /*
    8234          * Handle the VM-exit.
    8235          */
    8236         rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient);
    8237         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8238         if (rcStrict == VINF_SUCCESS)
    8239         {
    8240             if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    8241             {
    8242                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchNstGstVmexit);
    8243                 rcStrict = VINF_VMX_VMEXIT;
    8244             }
    8245             else
    8246             {
    8247                 if (++(*pcLoops) <= cMaxResumeLoops)
    8248                     continue;
    8249                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    8250                 rcStrict = VINF_EM_RAW_INTERRUPT;
    8251             }
    8252         }
    8253         else
    8254             Assert(rcStrict != VINF_VMX_VMEXIT);
    8255         break;
    8256     }
    8257 
    8258     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8259     return rcStrict;
    8260 }
    8261 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    8262 
    8263 
    8264 /** @name Execution loop for single stepping, DBGF events and expensive Dtrace
    8265  *  probes.
    8266  *
    8267  * The following few functions and associated structure contains the bloat
    8268  * necessary for providing detailed debug events and dtrace probes as well as
    8269  * reliable host side single stepping.  This works on the principle of
    8270  * "subclassing" the normal execution loop and workers.  We replace the loop
    8271  * method completely and override selected helpers to add necessary adjustments
    8272  * to their core operation.
    8273  *
    8274  * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
    8275  * any performance for debug and analysis features.
    8276  *
    8277  * @{
    8278  */
    8279 
    8280 /**
    8281  * Transient per-VCPU debug state of VMCS and related info. we save/restore in
    8282  * the debug run loop.
    8283  */
    8284 typedef struct VMXRUNDBGSTATE
    8285 {
    8286     /** The RIP we started executing at.  This is for detecting that we stepped.  */
    8287     uint64_t    uRipStart;
    8288     /** The CS we started executing with.  */
    8289     uint16_t    uCsStart;
    8290 
    8291     /** Whether we've actually modified the 1st execution control field. */
    8292     bool        fModifiedProcCtls : 1;
    8293     /** Whether we've actually modified the 2nd execution control field. */
    8294     bool        fModifiedProcCtls2 : 1;
    8295     /** Whether we've actually modified the exception bitmap. */
    8296     bool        fModifiedXcptBitmap : 1;
    8297 
    8298     /** We desire the modified the CR0 mask to be cleared. */
    8299     bool        fClearCr0Mask : 1;
    8300     /** We desire the modified the CR4 mask to be cleared. */
    8301     bool        fClearCr4Mask : 1;
    8302     /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
    8303     uint32_t    fCpe1Extra;
    8304     /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
    8305     uint32_t    fCpe1Unwanted;
    8306     /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
    8307     uint32_t    fCpe2Extra;
    8308     /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
    8309     uint32_t    bmXcptExtra;
    8310     /** The sequence number of the Dtrace provider settings the state was
    8311      *  configured against. */
    8312     uint32_t    uDtraceSettingsSeqNo;
    8313     /** VM-exits to check (one bit per VM-exit). */
    8314     uint32_t    bmExitsToCheck[3];
    8315 
    8316     /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
    8317     uint32_t    fProcCtlsInitial;
    8318     /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
    8319     uint32_t    fProcCtls2Initial;
    8320     /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
    8321     uint32_t    bmXcptInitial;
    8322 } VMXRUNDBGSTATE;
    8323 AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
    8324 typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
    8325 
    8326 
    8327 /**
    8328  * Initializes the VMXRUNDBGSTATE structure.
    8329  *
    8330  * @param   pVCpu           The cross context virtual CPU structure of the
    8331  *                          calling EMT.
    8332  * @param   pVmxTransient   The VMX-transient structure.
    8333  * @param   pDbgState       The debug state to initialize.
    8334  */
    8335 static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8336 {
    8337     pDbgState->uRipStart            = pVCpu->cpum.GstCtx.rip;
    8338     pDbgState->uCsStart             = pVCpu->cpum.GstCtx.cs.Sel;
    8339 
    8340     pDbgState->fModifiedProcCtls    = false;
    8341     pDbgState->fModifiedProcCtls2   = false;
    8342     pDbgState->fModifiedXcptBitmap  = false;
    8343     pDbgState->fClearCr0Mask        = false;
    8344     pDbgState->fClearCr4Mask        = false;
    8345     pDbgState->fCpe1Extra           = 0;
    8346     pDbgState->fCpe1Unwanted        = 0;
    8347     pDbgState->fCpe2Extra           = 0;
    8348     pDbgState->bmXcptExtra          = 0;
    8349     pDbgState->fProcCtlsInitial     = pVmxTransient->pVmcsInfo->u32ProcCtls;
    8350     pDbgState->fProcCtls2Initial    = pVmxTransient->pVmcsInfo->u32ProcCtls2;
    8351     pDbgState->bmXcptInitial        = pVmxTransient->pVmcsInfo->u32XcptBitmap;
    8352 }
    8353 
    8354 
    8355 /**
    8356  * Updates the VMSC fields with changes requested by @a pDbgState.
    8357  *
    8358  * This is performed after vmxHCPreRunGuestDebugStateUpdate as well
    8359  * immediately before executing guest code, i.e. when interrupts are disabled.
    8360  * We don't check status codes here as we cannot easily assert or return in the
    8361  * latter case.
    8362  *
    8363  * @param   pVCpu           The cross context virtual CPU structure.
    8364  * @param   pVmxTransient   The VMX-transient structure.
    8365  * @param   pDbgState       The debug state.
    8366  */
    8367 static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8368 {
    8369     /*
    8370      * Ensure desired flags in VMCS control fields are set.
    8371      * (Ignoring write failure here, as we're committed and it's just debug extras.)
    8372      *
    8373      * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
    8374      *       there should be no stale data in pCtx at this point.
    8375      */
    8376     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    8377     if (   (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
    8378         || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
    8379     {
    8380         pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
    8381         pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
    8382         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8383         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
    8384         pDbgState->fModifiedProcCtls   = true;
    8385     }
    8386 
    8387     if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
    8388     {
    8389         pVmcsInfo->u32ProcCtls2  |= pDbgState->fCpe2Extra;
    8390         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
    8391         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
    8392         pDbgState->fModifiedProcCtls2  = true;
    8393     }
    8394 
    8395     if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
    8396     {
    8397         pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
    8398         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
    8399         Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
    8400         pDbgState->fModifiedXcptBitmap = true;
    8401     }
    8402 
    8403     if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
    8404     {
    8405         pVmcsInfo->u64Cr0Mask = 0;
    8406         VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
    8407         Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
    8408     }
    8409 
    8410     if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
    8411     {
    8412         pVmcsInfo->u64Cr4Mask = 0;
    8413         VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
    8414         Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
    8415     }
    8416 
    8417     NOREF(pVCpu);
    8418 }
    8419 
    8420 
    8421 /**
    8422  * Restores VMCS fields that were changed by vmxHCPreRunGuestDebugStateApply for
    8423  * re-entry next time around.
    8424  *
    8425  * @returns Strict VBox status code (i.e. informational status codes too).
    8426  * @param   pVCpu           The cross context virtual CPU structure.
    8427  * @param   pVmxTransient   The VMX-transient structure.
    8428  * @param   pDbgState       The debug state.
    8429  * @param   rcStrict        The return code from executing the guest using single
    8430  *                          stepping.
    8431  */
    8432 static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
    8433                                                VBOXSTRICTRC rcStrict)
    8434 {
    8435     /*
    8436      * Restore VM-exit control settings as we may not reenter this function the
    8437      * next time around.
    8438      */
    8439     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    8440 
    8441     /* We reload the initial value, trigger what we can of recalculations the
    8442        next time around.  From the looks of things, that's all that's required atm. */
    8443     if (pDbgState->fModifiedProcCtls)
    8444     {
    8445         if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
    8446             pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in vmxHCLeave */
    8447         int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
    8448         AssertRC(rc2);
    8449         pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
    8450     }
    8451 
    8452     /* We're currently the only ones messing with this one, so just restore the
    8453        cached value and reload the field. */
    8454     if (   pDbgState->fModifiedProcCtls2
    8455         && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
    8456     {
    8457         int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
    8458         AssertRC(rc2);
    8459         pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
    8460     }
    8461 
    8462     /* If we've modified the exception bitmap, we restore it and trigger
    8463        reloading and partial recalculation the next time around. */
    8464     if (pDbgState->fModifiedXcptBitmap)
    8465         pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
    8466 
    8467     return rcStrict;
    8468 }
    8469 
    8470 
    8471 /**
    8472  * Configures VM-exit controls for current DBGF and DTrace settings.
    8473  *
    8474  * This updates @a pDbgState and the VMCS execution control fields to reflect
    8475  * the necessary VM-exits demanded by DBGF and DTrace.
    8476  *
    8477  * @param   pVCpu           The cross context virtual CPU structure.
    8478  * @param   pVmxTransient   The VMX-transient structure. May update
    8479  *                          fUpdatedTscOffsettingAndPreemptTimer.
    8480  * @param   pDbgState       The debug state.
    8481  */
    8482 static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8483 {
    8484     /*
    8485      * Take down the dtrace serial number so we can spot changes.
    8486      */
    8487     pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
    8488     ASMCompilerBarrier();
    8489 
    8490     /*
    8491      * We'll rebuild most of the middle block of data members (holding the
    8492      * current settings) as we go along here, so start by clearing it all.
    8493      */
    8494     pDbgState->bmXcptExtra      = 0;
    8495     pDbgState->fCpe1Extra       = 0;
    8496     pDbgState->fCpe1Unwanted    = 0;
    8497     pDbgState->fCpe2Extra       = 0;
    8498     for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
    8499         pDbgState->bmExitsToCheck[i] = 0;
    8500 
    8501     /*
    8502      * Software interrupts (INT XXh) - no idea how to trigger these...
    8503      */
    8504     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    8505     if (   DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
    8506         || VBOXVMM_INT_SOFTWARE_ENABLED())
    8507     {
    8508         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
    8509     }
    8510 
    8511     /*
    8512      * INT3 breakpoints - triggered by #BP exceptions.
    8513      */
    8514     if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
    8515         pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
    8516 
    8517     /*
    8518      * Exception bitmap and XCPT events+probes.
    8519      */
    8520     for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
    8521         if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
    8522             pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
    8523 
    8524     if (VBOXVMM_XCPT_DE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
    8525     if (VBOXVMM_XCPT_DB_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
    8526     if (VBOXVMM_XCPT_BP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
    8527     if (VBOXVMM_XCPT_OF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
    8528     if (VBOXVMM_XCPT_BR_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
    8529     if (VBOXVMM_XCPT_UD_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
    8530     if (VBOXVMM_XCPT_NM_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
    8531     if (VBOXVMM_XCPT_DF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
    8532     if (VBOXVMM_XCPT_TS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
    8533     if (VBOXVMM_XCPT_NP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
    8534     if (VBOXVMM_XCPT_SS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
    8535     if (VBOXVMM_XCPT_GP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
    8536     if (VBOXVMM_XCPT_PF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
    8537     if (VBOXVMM_XCPT_MF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
    8538     if (VBOXVMM_XCPT_AC_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
    8539     if (VBOXVMM_XCPT_XF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
    8540     if (VBOXVMM_XCPT_VE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
    8541     if (VBOXVMM_XCPT_SX_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
    8542 
    8543     if (pDbgState->bmXcptExtra)
    8544         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
    8545 
    8546     /*
    8547      * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
    8548      *
    8549      * Note! This is the reverse of what vmxHCHandleExitDtraceEvents does.
    8550      *       So, when adding/changing/removing please don't forget to update it.
    8551      *
    8552      * Some of the macros are picking up local variables to save horizontal space,
    8553      * (being able to see it in a table is the lesser evil here).
    8554      */
    8555 #define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
    8556         (    DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
    8557          ||  RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
    8558 #define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
    8559         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8560         {   AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8561             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8562         } else do { } while (0)
    8563 #define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
    8564         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8565         { \
    8566             (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
    8567             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8568             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8569         } else do { } while (0)
    8570 #define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
    8571         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8572         { \
    8573             (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
    8574             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8575             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8576         } else do { } while (0)
    8577 #define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
    8578         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8579         { \
    8580             (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
    8581             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8582             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8583         } else do { } while (0)
    8584 
    8585     SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);   /* unconditional */
    8586     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION); /* unconditional */
    8587     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
    8588     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);   /* feature dependent, nothing to enable here */
    8589     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);    /* feature dependent, nothing to enable here */
    8590 
    8591     SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);         /* unconditional */
    8592     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID,              VMX_EXIT_CPUID);
    8593     SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);        /* unconditional */
    8594     SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC,             VMX_EXIT_GETSEC);
    8595     SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT,               VMX_EXIT_HLT,      VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
    8596     SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT,               VMX_EXIT_HLT);
    8597     SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);          /* unconditional */
    8598     SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD,               VMX_EXIT_INVD);
    8599     SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG,             VMX_EXIT_INVLPG,   VMX_PROC_CTLS_INVLPG_EXIT);
    8600     SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG,             VMX_EXIT_INVLPG);
    8601     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC,              VMX_EXIT_RDPMC,    VMX_PROC_CTLS_RDPMC_EXIT);
    8602     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC,              VMX_EXIT_RDPMC);
    8603     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC,              VMX_EXIT_RDTSC,    VMX_PROC_CTLS_RDTSC_EXIT);
    8604     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC,              VMX_EXIT_RDTSC);
    8605     SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);           /* unconditional */
    8606     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM,                VMX_EXIT_RSM);
    8607     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);        /* unconditional */
    8608     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL,           VMX_EXIT_VMCALL);
    8609     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);       /* unconditional */
    8610     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);
    8611     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);      /* unconditional */
    8612     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);
    8613     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);       /* unconditional */
    8614     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);
    8615     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);       /* unconditional */
    8616     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST,        VMX_EXIT_VMPTRST);
    8617     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);        /* unconditional */
    8618     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD,         VMX_EXIT_VMREAD);
    8619     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);      /* unconditional */
    8620     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME,       VMX_EXIT_VMRESUME);
    8621     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);       /* unconditional */
    8622     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE,        VMX_EXIT_VMWRITE);
    8623     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);        /* unconditional */
    8624     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF,         VMX_EXIT_VMXOFF);
    8625     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);         /* unconditional */
    8626     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON,          VMX_EXIT_VMXON);
    8627 
    8628     if (   IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
    8629         || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    8630     {
    8631         int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
    8632                                                                         | CPUMCTX_EXTRN_APIC_TPR);
    8633         AssertRC(rc);
    8634 
    8635 #if 0 /** @todo fix me */
    8636         pDbgState->fClearCr0Mask = true;
    8637         pDbgState->fClearCr4Mask = true;
    8638 #endif
    8639         if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
    8640             pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
    8641         if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    8642             pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
    8643         pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
    8644         /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT.  It would
    8645                  require clearing here and in the loop if we start using it. */
    8646         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
    8647     }
    8648     else
    8649     {
    8650         if (pDbgState->fClearCr0Mask)
    8651         {
    8652             pDbgState->fClearCr0Mask = false;
    8653             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
    8654         }
    8655         if (pDbgState->fClearCr4Mask)
    8656         {
    8657             pDbgState->fClearCr4Mask = false;
    8658             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
    8659         }
    8660     }
    8661     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ,           VMX_EXIT_MOV_CRX);
    8662     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE,          VMX_EXIT_MOV_CRX);
    8663 
    8664     if (   IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
    8665         || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
    8666     {
    8667         /** @todo later, need to fix handler as it assumes this won't usually happen. */
    8668         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
    8669     }
    8670     SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ,           VMX_EXIT_MOV_DRX);
    8671     SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE,          VMX_EXIT_MOV_DRX);
    8672 
    8673     SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR,              VMX_EXIT_RDMSR,    VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
    8674     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR,              VMX_EXIT_RDMSR);
    8675     SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR,              VMX_EXIT_WRMSR,    VMX_PROC_CTLS_USE_MSR_BITMAPS);
    8676     SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR,              VMX_EXIT_WRMSR);
    8677     SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT,              VMX_EXIT_MWAIT,    VMX_PROC_CTLS_MWAIT_EXIT);   /* paranoia */
    8678     SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT,              VMX_EXIT_MWAIT);
    8679     SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR,            VMX_EXIT_MONITOR,  VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
    8680     SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR,            VMX_EXIT_MONITOR);
    8681 #if 0 /** @todo too slow, fix handler. */
    8682     SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE,              VMX_EXIT_PAUSE,    VMX_PROC_CTLS_PAUSE_EXIT);
    8683 #endif
    8684     SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE,              VMX_EXIT_PAUSE);
    8685 
    8686     if (   IS_EITHER_ENABLED(pVM, INSTR_SGDT)
    8687         || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
    8688         || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
    8689         || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
    8690     {
    8691         pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
    8692         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
    8693     }
    8694     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8695     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8696     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8697     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8698 
    8699     if (   IS_EITHER_ENABLED(pVM, INSTR_SLDT)
    8700         || IS_EITHER_ENABLED(pVM, INSTR_STR)
    8701         || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
    8702         || IS_EITHER_ENABLED(pVM, INSTR_LTR))
    8703     {
    8704         pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
    8705         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
    8706     }
    8707     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT,               VMX_EXIT_LDTR_TR_ACCESS);
    8708     SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR,                VMX_EXIT_LDTR_TR_ACCESS);
    8709     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT,               VMX_EXIT_LDTR_TR_ACCESS);
    8710     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR,                VMX_EXIT_LDTR_TR_ACCESS);
    8711 
    8712     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);        /* unconditional */
    8713     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT,         VMX_EXIT_INVEPT);
    8714     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP,             VMX_EXIT_RDTSCP,   VMX_PROC_CTLS_RDTSC_EXIT);
    8715     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP,             VMX_EXIT_RDTSCP);
    8716     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);       /* unconditional */
    8717     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID,        VMX_EXIT_INVVPID);
    8718     SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD,             VMX_EXIT_WBINVD,   VMX_PROC_CTLS2_WBINVD_EXIT);
    8719     SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD,             VMX_EXIT_WBINVD);
    8720     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);        /* unconditional */
    8721     SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV,             VMX_EXIT_XSETBV);
    8722     SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND,             VMX_EXIT_RDRAND,   VMX_PROC_CTLS2_RDRAND_EXIT);
    8723     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND,             VMX_EXIT_RDRAND);
    8724     SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID,        VMX_EXIT_INVPCID,  VMX_PROC_CTLS_INVLPG_EXIT);
    8725     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID,        VMX_EXIT_INVPCID);
    8726     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);        /* unconditional for the current setup */
    8727     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC,         VMX_EXIT_VMFUNC);
    8728     SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED,             VMX_EXIT_RDSEED,   VMX_PROC_CTLS2_RDSEED_EXIT);
    8729     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED,             VMX_EXIT_RDSEED);
    8730     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);        /* unconditional (enabled by host, guest cfg) */
    8731     SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES,              VMX_EXIT_XSAVES);
    8732     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);       /* unconditional (enabled by host, guest cfg) */
    8733     SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS,            VMX_EXIT_XRSTORS);
    8734 
    8735 #undef IS_EITHER_ENABLED
    8736 #undef SET_ONLY_XBM_IF_EITHER_EN
    8737 #undef SET_CPE1_XBM_IF_EITHER_EN
    8738 #undef SET_CPEU_XBM_IF_EITHER_EN
    8739 #undef SET_CPE2_XBM_IF_EITHER_EN
    8740 
    8741     /*
    8742      * Sanitize the control stuff.
    8743      */
    8744     pDbgState->fCpe2Extra       &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
    8745     if (pDbgState->fCpe2Extra)
    8746         pDbgState->fCpe1Extra   |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
    8747     pDbgState->fCpe1Extra       &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
    8748     pDbgState->fCpe1Unwanted    &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
    8749     if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
    8750     {
    8751         pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
    8752         pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    8753     }
    8754 
    8755     Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
    8756           pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
    8757           pDbgState->fClearCr0Mask ? " clr-cr0" : "",
    8758           pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
    8759 }
    8760 
    8761 
    8762 /**
    8763  * Fires off DBGF events and dtrace probes for a VM-exit, when it's
    8764  * appropriate.
    8765  *
    8766  * The caller has checked the VM-exit against the
    8767  * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
    8768  * already, so we don't have to do that either.
    8769  *
    8770  * @returns Strict VBox status code (i.e. informational status codes too).
    8771  * @param   pVCpu           The cross context virtual CPU structure.
    8772  * @param   pVmxTransient   The VMX-transient structure.
    8773  * @param   uExitReason     The VM-exit reason.
    8774  *
    8775  * @remarks The name of this function is displayed by dtrace, so keep it short
    8776  *          and to the point. No longer than 33 chars long, please.
    8777  */
    8778 static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
    8779 {
    8780     /*
    8781      * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
    8782      * same time check whether any corresponding Dtrace event is enabled (fDtrace).
    8783      *
    8784      * Note! This is the reverse operation of what vmxHCPreRunGuestDebugStateUpdate
    8785      *       does.  Must add/change/remove both places.  Same ordering, please.
    8786      *
    8787      *       Added/removed events must also be reflected in the next section
    8788      *       where we dispatch dtrace events.
    8789      */
    8790     bool            fDtrace1   = false;
    8791     bool            fDtrace2   = false;
    8792     DBGFEVENTTYPE   enmEvent1  = DBGFEVENT_END;
    8793     DBGFEVENTTYPE   enmEvent2  = DBGFEVENT_END;
    8794     uint32_t        uEventArg  = 0;
    8795 #define SET_EXIT(a_EventSubName) \
    8796         do { \
    8797             enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
    8798             fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
    8799         } while (0)
    8800 #define SET_BOTH(a_EventSubName) \
    8801         do { \
    8802             enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
    8803             enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
    8804             fDtrace1  = RT_CONCAT3(VBOXVMM_INSTR_,  a_EventSubName, _ENABLED)(); \
    8805             fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
    8806         } while (0)
    8807     switch (uExitReason)
    8808     {
    8809         case VMX_EXIT_MTF:
    8810             return vmxHCExitMtf(pVCpu, pVmxTransient);
    8811 
    8812         case VMX_EXIT_XCPT_OR_NMI:
    8813         {
    8814             uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    8815             switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
    8816             {
    8817                 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
    8818                 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
    8819                 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
    8820                     if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
    8821                     {
    8822                         if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
    8823                         {
    8824                             vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
    8825                             uEventArg = pVmxTransient->uExitIntErrorCode;
    8826                         }
    8827                         enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
    8828                         switch (enmEvent1)
    8829                         {
    8830                             case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
    8831                             case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
    8832                             case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
    8833                             case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
    8834                             case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
    8835                             case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
    8836                             case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
    8837                             case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
    8838                             case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
    8839                             case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
    8840                             case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
    8841                             case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
    8842                             case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
    8843                             case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
    8844                             case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
    8845                             case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
    8846                             case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
    8847                             case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
    8848                             default:                                                      break;
    8849                         }
    8850                     }
    8851                     else
    8852                         AssertFailed();
    8853                     break;
    8854 
    8855                 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
    8856                     uEventArg = idxVector;
    8857                     enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
    8858                     fDtrace1  = VBOXVMM_INT_SOFTWARE_ENABLED();
    8859                     break;
    8860             }
    8861             break;
    8862         }
    8863 
    8864         case VMX_EXIT_TRIPLE_FAULT:
    8865             enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
    8866             //fDtrace1  = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
    8867             break;
    8868         case VMX_EXIT_TASK_SWITCH:      SET_EXIT(TASK_SWITCH); break;
    8869         case VMX_EXIT_EPT_VIOLATION:    SET_EXIT(VMX_EPT_VIOLATION); break;
    8870         case VMX_EXIT_EPT_MISCONFIG:    SET_EXIT(VMX_EPT_MISCONFIG); break;
    8871         case VMX_EXIT_APIC_ACCESS:      SET_EXIT(VMX_VAPIC_ACCESS); break;
    8872         case VMX_EXIT_APIC_WRITE:       SET_EXIT(VMX_VAPIC_WRITE); break;
    8873 
    8874         /* Instruction specific VM-exits: */
    8875         case VMX_EXIT_CPUID:            SET_BOTH(CPUID); break;
    8876         case VMX_EXIT_GETSEC:           SET_BOTH(GETSEC); break;
    8877         case VMX_EXIT_HLT:              SET_BOTH(HALT); break;
    8878         case VMX_EXIT_INVD:             SET_BOTH(INVD); break;
    8879         case VMX_EXIT_INVLPG:           SET_BOTH(INVLPG); break;
    8880         case VMX_EXIT_RDPMC:            SET_BOTH(RDPMC); break;
    8881         case VMX_EXIT_RDTSC:            SET_BOTH(RDTSC); break;
    8882         case VMX_EXIT_RSM:              SET_BOTH(RSM); break;
    8883         case VMX_EXIT_VMCALL:           SET_BOTH(VMM_CALL); break;
    8884         case VMX_EXIT_VMCLEAR:          SET_BOTH(VMX_VMCLEAR); break;
    8885         case VMX_EXIT_VMLAUNCH:         SET_BOTH(VMX_VMLAUNCH); break;
    8886         case VMX_EXIT_VMPTRLD:          SET_BOTH(VMX_VMPTRLD); break;
    8887         case VMX_EXIT_VMPTRST:          SET_BOTH(VMX_VMPTRST); break;
    8888         case VMX_EXIT_VMREAD:           SET_BOTH(VMX_VMREAD); break;
    8889         case VMX_EXIT_VMRESUME:         SET_BOTH(VMX_VMRESUME); break;
    8890         case VMX_EXIT_VMWRITE:          SET_BOTH(VMX_VMWRITE); break;
    8891         case VMX_EXIT_VMXOFF:           SET_BOTH(VMX_VMXOFF); break;
    8892         case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
    8893         case VMX_EXIT_MOV_CRX:
    8894             vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8895             if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    8896                 SET_BOTH(CRX_READ);
    8897             else
    8898                 SET_BOTH(CRX_WRITE);
    8899             uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    8900             break;
    8901         case VMX_EXIT_MOV_DRX:
    8902             vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8903             if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
    8904                 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
    8905                 SET_BOTH(DRX_READ);
    8906             else
    8907                 SET_BOTH(DRX_WRITE);
    8908             uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
    8909             break;
    8910         case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
    8911         case VMX_EXIT_WRMSR:            SET_BOTH(WRMSR); break;
    8912         case VMX_EXIT_MWAIT:            SET_BOTH(MWAIT); break;
    8913         case VMX_EXIT_MONITOR:          SET_BOTH(MONITOR); break;
    8914         case VMX_EXIT_PAUSE:            SET_BOTH(PAUSE); break;
    8915         case VMX_EXIT_GDTR_IDTR_ACCESS:
    8916             vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    8917             switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
    8918             {
    8919                 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
    8920                 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
    8921                 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
    8922                 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
    8923             }
    8924             break;
    8925 
    8926         case VMX_EXIT_LDTR_TR_ACCESS:
    8927             vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    8928             switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
    8929             {
    8930                 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
    8931                 case VMX_YYTR_INSINFO_II_STR:  SET_BOTH(STR); break;
    8932                 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
    8933                 case VMX_YYTR_INSINFO_II_LTR:  SET_BOTH(LTR); break;
    8934             }
    8935             break;
    8936 
    8937         case VMX_EXIT_INVEPT:           SET_BOTH(VMX_INVEPT); break;
    8938         case VMX_EXIT_RDTSCP:           SET_BOTH(RDTSCP); break;
    8939         case VMX_EXIT_INVVPID:          SET_BOTH(VMX_INVVPID); break;
    8940         case VMX_EXIT_WBINVD:           SET_BOTH(WBINVD); break;
    8941         case VMX_EXIT_XSETBV:           SET_BOTH(XSETBV); break;
    8942         case VMX_EXIT_RDRAND:           SET_BOTH(RDRAND); break;
    8943         case VMX_EXIT_INVPCID:          SET_BOTH(VMX_INVPCID); break;
    8944         case VMX_EXIT_VMFUNC:           SET_BOTH(VMX_VMFUNC); break;
    8945         case VMX_EXIT_RDSEED:           SET_BOTH(RDSEED); break;
    8946         case VMX_EXIT_XSAVES:           SET_BOTH(XSAVES); break;
    8947         case VMX_EXIT_XRSTORS:          SET_BOTH(XRSTORS); break;
    8948 
    8949         /* Events that aren't relevant at this point. */
    8950         case VMX_EXIT_EXT_INT:
    8951         case VMX_EXIT_INT_WINDOW:
    8952         case VMX_EXIT_NMI_WINDOW:
    8953         case VMX_EXIT_TPR_BELOW_THRESHOLD:
    8954         case VMX_EXIT_PREEMPT_TIMER:
    8955         case VMX_EXIT_IO_INSTR:
    8956             break;
    8957 
    8958         /* Errors and unexpected events. */
    8959         case VMX_EXIT_INIT_SIGNAL:
    8960         case VMX_EXIT_SIPI:
    8961         case VMX_EXIT_IO_SMI:
    8962         case VMX_EXIT_SMI:
    8963         case VMX_EXIT_ERR_INVALID_GUEST_STATE:
    8964         case VMX_EXIT_ERR_MSR_LOAD:
    8965         case VMX_EXIT_ERR_MACHINE_CHECK:
    8966         case VMX_EXIT_PML_FULL:
    8967         case VMX_EXIT_VIRTUALIZED_EOI:
    8968             break;
    8969 
    8970         default:
    8971             AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
    8972             break;
    8973     }
    8974 #undef SET_BOTH
    8975 #undef SET_EXIT
    8976 
    8977     /*
    8978      * Dtrace tracepoints go first.   We do them here at once so we don't
    8979      * have to copy the guest state saving and stuff a few dozen times.
    8980      * Down side is that we've got to repeat the switch, though this time
    8981      * we use enmEvent since the probes are a subset of what DBGF does.
    8982      */
    8983     if (fDtrace1 || fDtrace2)
    8984     {
    8985         vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8986         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    8987         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    8988         switch (enmEvent1)
    8989         {
    8990             /** @todo consider which extra parameters would be helpful for each probe.   */
    8991             case DBGFEVENT_END: break;
    8992             case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
    8993             case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
    8994             case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
    8995             case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
    8996             case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
    8997             case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
    8998             case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
    8999             case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
    9000             case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
    9001             case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
    9002             case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
    9003             case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
    9004             case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
    9005             case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
    9006             case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
    9007             case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
    9008             case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
    9009             case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
    9010             case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9011             case DBGFEVENT_INSTR_CPUID:             VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
    9012             case DBGFEVENT_INSTR_GETSEC:            VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
    9013             case DBGFEVENT_INSTR_HALT:              VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
    9014             case DBGFEVENT_INSTR_INVD:              VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
    9015             case DBGFEVENT_INSTR_INVLPG:            VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
    9016             case DBGFEVENT_INSTR_RDPMC:             VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
    9017             case DBGFEVENT_INSTR_RDTSC:             VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
    9018             case DBGFEVENT_INSTR_RSM:               VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
    9019             case DBGFEVENT_INSTR_CRX_READ:          VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9020             case DBGFEVENT_INSTR_CRX_WRITE:         VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9021             case DBGFEVENT_INSTR_DRX_READ:          VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9022             case DBGFEVENT_INSTR_DRX_WRITE:         VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9023             case DBGFEVENT_INSTR_RDMSR:             VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
    9024             case DBGFEVENT_INSTR_WRMSR:             VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
    9025                                                                         RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
    9026             case DBGFEVENT_INSTR_MWAIT:             VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
    9027             case DBGFEVENT_INSTR_MONITOR:           VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
    9028             case DBGFEVENT_INSTR_PAUSE:             VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
    9029             case DBGFEVENT_INSTR_SGDT:              VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
    9030             case DBGFEVENT_INSTR_SIDT:              VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
    9031             case DBGFEVENT_INSTR_LGDT:              VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
    9032             case DBGFEVENT_INSTR_LIDT:              VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
    9033             case DBGFEVENT_INSTR_SLDT:              VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
    9034             case DBGFEVENT_INSTR_STR:               VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
    9035             case DBGFEVENT_INSTR_LLDT:              VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
    9036             case DBGFEVENT_INSTR_LTR:               VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
    9037             case DBGFEVENT_INSTR_RDTSCP:            VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
    9038             case DBGFEVENT_INSTR_WBINVD:            VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
    9039             case DBGFEVENT_INSTR_XSETBV:            VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
    9040             case DBGFEVENT_INSTR_RDRAND:            VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
    9041             case DBGFEVENT_INSTR_RDSEED:            VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
    9042             case DBGFEVENT_INSTR_XSAVES:            VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
    9043             case DBGFEVENT_INSTR_XRSTORS:           VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
    9044             case DBGFEVENT_INSTR_VMM_CALL:          VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
    9045             case DBGFEVENT_INSTR_VMX_VMCLEAR:       VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
    9046             case DBGFEVENT_INSTR_VMX_VMLAUNCH:      VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
    9047             case DBGFEVENT_INSTR_VMX_VMPTRLD:       VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
    9048             case DBGFEVENT_INSTR_VMX_VMPTRST:       VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
    9049             case DBGFEVENT_INSTR_VMX_VMREAD:        VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
    9050             case DBGFEVENT_INSTR_VMX_VMRESUME:      VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
    9051             case DBGFEVENT_INSTR_VMX_VMWRITE:       VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
    9052             case DBGFEVENT_INSTR_VMX_VMXOFF:        VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
    9053             case DBGFEVENT_INSTR_VMX_VMXON:         VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
    9054             case DBGFEVENT_INSTR_VMX_INVEPT:        VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
    9055             case DBGFEVENT_INSTR_VMX_INVVPID:       VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
    9056             case DBGFEVENT_INSTR_VMX_INVPCID:       VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
    9057             case DBGFEVENT_INSTR_VMX_VMFUNC:        VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
    9058             default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
    9059         }
    9060         switch (enmEvent2)
    9061         {
    9062             /** @todo consider which extra parameters would be helpful for each probe. */
    9063             case DBGFEVENT_END: break;
    9064             case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
    9065             case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
    9066             case DBGFEVENT_EXIT_GETSEC:             VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
    9067             case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
    9068             case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
    9069             case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
    9070             case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
    9071             case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
    9072             case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
    9073             case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9074             case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9075             case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9076             case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9077             case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
    9078             case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
    9079                                                                        RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
    9080             case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
    9081             case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
    9082             case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
    9083             case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
    9084             case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
    9085             case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
    9086             case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
    9087             case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
    9088             case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
    9089             case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
    9090             case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
    9091             case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
    9092             case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
    9093             case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
    9094             case DBGFEVENT_EXIT_RDRAND:             VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
    9095             case DBGFEVENT_EXIT_RDSEED:             VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
    9096             case DBGFEVENT_EXIT_XSAVES:             VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
    9097             case DBGFEVENT_EXIT_XRSTORS:            VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
    9098             case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
    9099             case DBGFEVENT_EXIT_VMX_VMCLEAR:        VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
    9100             case DBGFEVENT_EXIT_VMX_VMLAUNCH:       VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
    9101             case DBGFEVENT_EXIT_VMX_VMPTRLD:        VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
    9102             case DBGFEVENT_EXIT_VMX_VMPTRST:        VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
    9103             case DBGFEVENT_EXIT_VMX_VMREAD:         VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
    9104             case DBGFEVENT_EXIT_VMX_VMRESUME:       VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
    9105             case DBGFEVENT_EXIT_VMX_VMWRITE:        VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
    9106             case DBGFEVENT_EXIT_VMX_VMXOFF:         VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
    9107             case DBGFEVENT_EXIT_VMX_VMXON:          VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
    9108             case DBGFEVENT_EXIT_VMX_INVEPT:         VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
    9109             case DBGFEVENT_EXIT_VMX_INVVPID:        VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
    9110             case DBGFEVENT_EXIT_VMX_INVPCID:        VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
    9111             case DBGFEVENT_EXIT_VMX_VMFUNC:         VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
    9112             case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG:  VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
    9113             case DBGFEVENT_EXIT_VMX_EPT_VIOLATION:  VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
    9114             case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS:   VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
    9115             case DBGFEVENT_EXIT_VMX_VAPIC_WRITE:    VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
    9116             default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
    9117         }
    9118     }
    9119 
    9120     /*
    9121      * Fire of the DBGF event, if enabled (our check here is just a quick one,
    9122      * the DBGF call will do a full check).
    9123      *
    9124      * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
    9125      * Note! If we have to events, we prioritize the first, i.e. the instruction
    9126      *       one, in order to avoid event nesting.
    9127      */
    9128     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    9129     if (   enmEvent1 != DBGFEVENT_END
    9130         && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
    9131     {
    9132         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9133         VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
    9134         if (rcStrict != VINF_SUCCESS)
    9135             return rcStrict;
    9136     }
    9137     else if (   enmEvent2 != DBGFEVENT_END
    9138              && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
    9139     {
    9140         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9141         VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
    9142         if (rcStrict != VINF_SUCCESS)
    9143             return rcStrict;
    9144     }
    9145 
    9146     return VINF_SUCCESS;
    9147 }
    9148 
    9149 
    9150 /**
    9151  * Single-stepping VM-exit filtering.
    9152  *
    9153  * This is preprocessing the VM-exits and deciding whether we've gotten far
    9154  * enough to return VINF_EM_DBG_STEPPED already.  If not, normal VM-exit
    9155  * handling is performed.
    9156  *
    9157  * @returns Strict VBox status code (i.e. informational status codes too).
    9158  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    9159  * @param   pVmxTransient   The VMX-transient structure.
    9160  * @param   pDbgState       The debug state.
    9161  */
    9162 DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    9163 {
    9164     /*
    9165      * Expensive (saves context) generic dtrace VM-exit probe.
    9166      */
    9167     uint32_t const uExitReason = pVmxTransient->uExitReason;
    9168     if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
    9169     { /* more likely */ }
    9170     else
    9171     {
    9172         vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    9173         int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    9174         AssertRC(rc);
    9175         VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    9176     }
    9177 
    9178 #ifdef IN_RING0 /* NMIs should never reach R3. */
    9179     /*
    9180      * Check for host NMI, just to get that out of the way.
    9181      */
    9182     if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
    9183     { /* normally likely */ }
    9184     else
    9185     {
    9186         vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    9187         uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    9188         if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
    9189             return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    9190     }
    9191 #endif
    9192 
    9193     /*
    9194      * Check for single stepping event if we're stepping.
    9195      */
    9196     if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    9197     {
    9198         switch (uExitReason)
    9199         {
    9200             case VMX_EXIT_MTF:
    9201                 return vmxHCExitMtf(pVCpu, pVmxTransient);
    9202 
    9203             /* Various events: */
    9204             case VMX_EXIT_XCPT_OR_NMI:
    9205             case VMX_EXIT_EXT_INT:
    9206             case VMX_EXIT_TRIPLE_FAULT:
    9207             case VMX_EXIT_INT_WINDOW:
    9208             case VMX_EXIT_NMI_WINDOW:
    9209             case VMX_EXIT_TASK_SWITCH:
    9210             case VMX_EXIT_TPR_BELOW_THRESHOLD:
    9211             case VMX_EXIT_APIC_ACCESS:
    9212             case VMX_EXIT_EPT_VIOLATION:
    9213             case VMX_EXIT_EPT_MISCONFIG:
    9214             case VMX_EXIT_PREEMPT_TIMER:
    9215 
    9216             /* Instruction specific VM-exits: */
    9217             case VMX_EXIT_CPUID:
    9218             case VMX_EXIT_GETSEC:
    9219             case VMX_EXIT_HLT:
    9220             case VMX_EXIT_INVD:
    9221             case VMX_EXIT_INVLPG:
    9222             case VMX_EXIT_RDPMC:
    9223             case VMX_EXIT_RDTSC:
    9224             case VMX_EXIT_RSM:
    9225             case VMX_EXIT_VMCALL:
    9226             case VMX_EXIT_VMCLEAR:
    9227             case VMX_EXIT_VMLAUNCH:
    9228             case VMX_EXIT_VMPTRLD:
    9229             case VMX_EXIT_VMPTRST:
    9230             case VMX_EXIT_VMREAD:
    9231             case VMX_EXIT_VMRESUME:
    9232             case VMX_EXIT_VMWRITE:
    9233             case VMX_EXIT_VMXOFF:
    9234             case VMX_EXIT_VMXON:
    9235             case VMX_EXIT_MOV_CRX:
    9236             case VMX_EXIT_MOV_DRX:
    9237             case VMX_EXIT_IO_INSTR:
    9238             case VMX_EXIT_RDMSR:
    9239             case VMX_EXIT_WRMSR:
    9240             case VMX_EXIT_MWAIT:
    9241             case VMX_EXIT_MONITOR:
    9242             case VMX_EXIT_PAUSE:
    9243             case VMX_EXIT_GDTR_IDTR_ACCESS:
    9244             case VMX_EXIT_LDTR_TR_ACCESS:
    9245             case VMX_EXIT_INVEPT:
    9246             case VMX_EXIT_RDTSCP:
    9247             case VMX_EXIT_INVVPID:
    9248             case VMX_EXIT_WBINVD:
    9249             case VMX_EXIT_XSETBV:
    9250             case VMX_EXIT_RDRAND:
    9251             case VMX_EXIT_INVPCID:
    9252             case VMX_EXIT_VMFUNC:
    9253             case VMX_EXIT_RDSEED:
    9254             case VMX_EXIT_XSAVES:
    9255             case VMX_EXIT_XRSTORS:
    9256             {
    9257                 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9258                 AssertRCReturn(rc, rc);
    9259                 if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
    9260                     || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
    9261                     return VINF_EM_DBG_STEPPED;
    9262                 break;
    9263             }
    9264 
    9265             /* Errors and unexpected events: */
    9266             case VMX_EXIT_INIT_SIGNAL:
    9267             case VMX_EXIT_SIPI:
    9268             case VMX_EXIT_IO_SMI:
    9269             case VMX_EXIT_SMI:
    9270             case VMX_EXIT_ERR_INVALID_GUEST_STATE:
    9271             case VMX_EXIT_ERR_MSR_LOAD:
    9272             case VMX_EXIT_ERR_MACHINE_CHECK:
    9273             case VMX_EXIT_PML_FULL:
    9274             case VMX_EXIT_VIRTUALIZED_EOI:
    9275             case VMX_EXIT_APIC_WRITE:  /* Some talk about this being fault like, so I guess we must process it? */
    9276                 break;
    9277 
    9278             default:
    9279                 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
    9280                 break;
    9281         }
    9282     }
    9283 
    9284     /*
    9285      * Check for debugger event breakpoints and dtrace probes.
    9286      */
    9287     if (   uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
    9288         && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
    9289     {
    9290         VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
    9291         if (rcStrict != VINF_SUCCESS)
    9292             return rcStrict;
    9293     }
    9294 
    9295     /*
    9296      * Normal processing.
    9297      */
    9298 #ifdef HMVMX_USE_FUNCTION_TABLE
    9299     return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
    9300 #else
    9301     return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
    9302 #endif
    9303 }
    9304 
    9305 
    9306 /**
    9307  * Single steps guest code using hardware-assisted VMX.
    9308  *
    9309  * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
    9310  * but single-stepping through the hypervisor debugger.
    9311  *
    9312  * @returns Strict VBox status code (i.e. informational status codes too).
    9313  * @param   pVCpu       The cross context virtual CPU structure.
    9314  * @param   pcLoops     Pointer to the number of executed loops.
    9315  *
    9316  * @note    Mostly the same as vmxHCRunGuestCodeNormal().
    9317  */
    9318 static VBOXSTRICTRC vmxHCRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
    9319 {
    9320     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    9321     Assert(pcLoops);
    9322     Assert(*pcLoops <= cMaxResumeLoops);
    9323 
    9324     VMXTRANSIENT VmxTransient;
    9325     RT_ZERO(VmxTransient);
    9326     VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    9327 
    9328     /* Set HMCPU indicators.  */
    9329     bool const fSavedSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    9330     VCPU_2_VMXSTATE(pVCpu).fSingleInstruction     = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction || DBGFIsStepping(pVCpu);
    9331     pVCpu->hmr0.s.fDebugWantRdTscExit    = false;
    9332     pVCpu->hmr0.s.fUsingDebugLoop        = true;
    9333 
    9334     /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
    9335     VMXRUNDBGSTATE DbgState;
    9336     vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
    9337     vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    9338 
    9339     /*
    9340      * The loop.
    9341      */
    9342     VBOXSTRICTRC rcStrict  = VERR_INTERNAL_ERROR_5;
    9343     for (;;)
    9344     {
    9345         Assert(!HMR0SuspendPending());
    9346         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    9347         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    9348         bool fStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    9349 
    9350         /* Set up VM-execution controls the next two can respond to. */
    9351         vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
    9352 
    9353         /*
    9354          * Preparatory work for running guest code, this may force us to
    9355          * return to ring-3.
    9356          *
    9357          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    9358          */
    9359         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, fStepping);
    9360         if (rcStrict != VINF_SUCCESS)
    9361             break;
    9362 
    9363         /* Interrupts are disabled at this point! */
    9364         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    9365 
    9366         /* Override any obnoxious code in the above two calls. */
    9367         vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
    9368 
    9369         /*
    9370          * Finally execute the guest.
    9371          */
    9372         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    9373 
    9374         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    9375         /* Interrupts are re-enabled at this point! */
    9376 
    9377         /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
    9378         if (RT_SUCCESS(rcRun))
    9379         { /* very likely */ }
    9380         else
    9381         {
    9382             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    9383             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    9384             return rcRun;
    9385         }
    9386 
    9387         /* Profile the VM-exit. */
    9388         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    9389         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    9390         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    9391         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    9392         HMVMX_START_EXIT_DISPATCH_PROF();
    9393 
    9394         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    9395 
    9396         /*
    9397          * Handle the VM-exit - we quit earlier on certain VM-exits, see vmxHCHandleExitDebug().
    9398          */
    9399         rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
    9400         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    9401         if (rcStrict != VINF_SUCCESS)
    9402             break;
    9403         if (++(*pcLoops) > cMaxResumeLoops)
    9404         {
    9405             STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    9406             rcStrict = VINF_EM_RAW_INTERRUPT;
    9407             break;
    9408         }
    9409 
    9410         /*
    9411          * Stepping: Did the RIP change, if so, consider it a single step.
    9412          * Otherwise, make sure one of the TFs gets set.
    9413          */
    9414         if (fStepping)
    9415         {
    9416             int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9417             AssertRC(rc);
    9418             if (   pVCpu->cpum.GstCtx.rip    != DbgState.uRipStart
    9419                 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
    9420             {
    9421                 rcStrict = VINF_EM_DBG_STEPPED;
    9422                 break;
    9423             }
    9424             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
    9425         }
    9426 
    9427         /*
    9428          * Update when dtrace settings changes (DBGF kicks us, so no need to check).
    9429          */
    9430         if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
    9431             vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    9432 
    9433         /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */
    9434         rcStrict = vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
    9435         Assert(rcStrict == VINF_SUCCESS);
    9436     }
    9437 
    9438     /*
    9439      * Clear the X86_EFL_TF if necessary.
    9440      */
    9441     if (pVCpu->hmr0.s.fClearTrapFlag)
    9442     {
    9443         int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
    9444         AssertRC(rc);
    9445         pVCpu->hmr0.s.fClearTrapFlag = false;
    9446         pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
    9447     }
    9448     /** @todo there seems to be issues with the resume flag when the monitor trap
    9449      *        flag is pending without being used. Seen early in bios init when
    9450      *        accessing APIC page in protected mode. */
    9451 
    9452     /* Restore HMCPU indicators. */
    9453     pVCpu->hmr0.s.fUsingDebugLoop     = false;
    9454     pVCpu->hmr0.s.fDebugWantRdTscExit = false;
    9455     VCPU_2_VMXSTATE(pVCpu).fSingleInstruction  = fSavedSingleInstruction;
    9456 
    9457     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    9458     return rcStrict;
    9459 }
    9460 #endif
    9461 
    94625267/** @} */
    94635268
     
    97195524
    97205525#ifdef VBOX_STRICT
    9721 # ifdef IN_RING0
     5526# ifndef IN_NEM_DARWIN
    97225527/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
    97235528# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
     
    104706275            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    104716276            STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
    10472 #ifdef IN_RING0
     6277#ifndef IN_NEM_DARWIN
    104736278            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    104746279                      pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
     
    105086313    vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    105096314
    10510 #ifdef IN_RING0
     6315#ifndef IN_NEM_DARWIN
    105116316    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    105126317    if (!VM_IS_VMX_NESTED_PAGING(pVM))
     
    105156320#endif
    105166321    {
    10517 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && defined(IN_RING0)
     6322#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
    105186323        Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
    105196324#endif
     
    106916496        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    106926497        if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
    10693 #ifdef IN_RING0
     6498#ifndef IN_NEM_DARWIN
    106946499            && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
    106956500#endif
     
    108166621         * See Intel spec. 27.1 "Architectural State before a VM-Exit".
    108176622         */
    10818 #ifdef IN_RING0
     6623#ifndef IN_NEM_DARWIN
    108196624        VMMRZCallRing3Disable(pVCpu);
    108206625        HM_DISABLE_PREEMPT(pVCpu);
     
    109516756    PCPUMCTX            pCtx            = &pVCpu->cpum.GstCtx;
    109526757    PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    10953 #ifdef IN_RING0
     6758#ifndef IN_NEM_DARWIN
    109546759    PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    109556760    if (pVmcsInfoShared->RealMode.fRealOnV86Active)
     
    109596764    {
    109606765#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    10961 # ifdef IN_RING0
     6766# ifndef IN_NEM_DARWIN
    109626767        Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
    109636768# else
     
    109846789    }
    109856790
    10986 #ifdef IN_RING0
     6791#ifndef IN_NEM_DARWIN
    109876792    Assert(CPUMIsGuestInRealModeEx(pCtx));
    109886793    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
     
    110406845
    110416846#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    11042 # ifdef IN_RING0
     6847# ifndef IN_NEM_DARWIN
    110436848    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    110446849    AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
     
    111676972    STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
    111686973
    11169 #ifdef IN_RING0
     6974#ifndef IN_NEM_DARWIN
    111706975    /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
    111716976    if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
     
    112017006    switch (uExitIntType)
    112027007    {
    11203 #ifdef IN_RING0 /* NMIs should never reach R3. */
     7008#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
    112047009        /*
    112057010         * Host physical NMIs:
     
    112137018        case VMX_EXIT_INT_INFO_TYPE_NMI:
    112147019        {
    11215             rcStrict = vmxHCExitHostNmi(pVCpu, pVmcsInfo);
     7020            rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
    112167021            break;
    112177022        }
     
    115487353{
    115497354    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11550 #ifdef IN_RING0
     7355#ifndef IN_NEM_DARWIN
    115517356    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
    115527357#endif
     
    117037508                                                                                : HM_CHANGED_RAISED_XCPT_MASK);
    117047509
    11705 #ifdef IN_RING0
     7510#ifndef IN_NEM_DARWIN
    117067511    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    117077512    bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     
    117097514    {
    117107515        pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    11711         vmxHCUpdateStartVmFunction(pVCpu);
     7516        hmR0VmxUpdateStartVmFunction(pVCpu);
    117127517    }
    117137518#endif
     
    118017606    rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val);             AssertRC(rc);
    118027607    Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW              %#RX64\n", u64Val));
    11803 # ifdef IN_RING0
     7608# ifndef IN_NEM_DARWIN
    118047609    if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
    118057610    {
     
    119297734    Log4Func(("ecx=%#RX32\n", idMsr));
    119307735
    11931 #if defined(VBOX_STRICT) && defined(IN_RING0)
     7736#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
    119327737    Assert(!pVmxTransient->fIsNestedGuest);
    119337738    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    119347739    {
    11935         if (   vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
     7740        if (   hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
    119367741            && idMsr != MSR_K6_EFER)
    119377742        {
     
    119397744            HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    119407745        }
    11941         if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7746        if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    119427747        {
    119437748            Assert(pVmcsInfo->pvMsrBitmap);
     
    120467851                default:
    120477852                {
    12048 #ifdef IN_RING0
    12049                     if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7853#ifndef IN_NEM_DARWIN
     7854                    if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    120507855                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
    12051                     else if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
     7856                    else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    120527857                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    120537858#else
     
    120587863            }
    120597864        }
    12060 #if defined(VBOX_STRICT) && defined(IN_RING0)
     7865#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
    120617866        else
    120627867        {
     
    120777882                default:
    120787883                {
    12079                     if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
     7884                    if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    120807885                    {
    120817886                        /* EFER MSR writes are always intercepted. */
     
    120887893                    }
    120897894
    12090                     if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7895                    if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    120917896                    {
    120927897                        Assert(pVmcsInfo->pvMsrBitmap);
     
    121937998
    121947999            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    12195 #ifdef IN_RING0
     8000#ifndef IN_NEM_DARWIN
    121968001            uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    121978002#endif
     
    122058010             *   - We are executing in the VM debug loop.
    122068011             */
    12207 #ifdef IN_RING0
     8012#ifndef IN_NEM_DARWIN
    122088013            Assert(   iCrReg != 3
    122098014                   || !VM_IS_VMX_NESTED_PAGING(pVM)
     
    122238028                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    122248029
    12225 #ifdef IN_RING0
     8030#ifndef IN_NEM_DARWIN
    122268031            /*
    122278032             * This is a kludge for handling switches back to real mode when we try to use
     
    122658070             *   - We are executing in the VM debug loop.
    122668071             */
    12267 #ifdef IN_RING0
     8072#ifndef IN_NEM_DARWIN
    122688073            Assert(   iCrReg != 3
    122698074                   || !VM_IS_VMX_NESTED_PAGING(pVM)
     
    124328237                rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
    124338238                STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
    12434 #ifdef IN_RING0
     8239#ifndef IN_NEM_DARWIN
    124358240                if (    rcStrict == VINF_IOM_R3_IOPORT_WRITE
    124368241                    && !pCtx->eflags.Bits.u1TF)
     
    124478252                    pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    124488253                }
    12449 #ifdef IN_RING0
     8254#ifndef IN_NEM_DARWIN
    124508255                if (    rcStrict == VINF_IOM_R3_IOPORT_READ
    124518256                    && !pCtx->eflags.Bits.u1TF)
     
    124918296                STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
    124928297
    12493 #ifdef IN_RING0
     8298#ifndef IN_NEM_DARWIN
    124948299                /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
    124958300                VMMRZCallRing3Disable(pVCpu);
     
    126798484    switch (uAccessType)
    126808485    {
    12681 #ifdef IN_RING0
     8486#ifndef IN_NEM_DARWIN
    126828487        case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
    126838488        case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
     
    127588563            AssertRC(rc);
    127598564
    12760 #ifdef IN_RING0
     8565#ifndef IN_NEM_DARWIN
    127618566            /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
    127628567            VMMRZCallRing3Disable(pVCpu);
     
    128338638    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    128348639
    12835 #ifdef IN_RING0
     8640#ifndef IN_NEM_DARWIN
    128368641    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    128378642
     
    129468751{
    129478752    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12948 #ifdef IN_RING0
     8753#ifndef IN_NEM_DARWIN
    129498754    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    129508755
     
    134739278    switch (uExitIntType)
    134749279    {
     9280#ifndef IN_NEM_DARWIN
    134759281        /*
    134769282         * Physical NMIs:
     
    134789284         */
    134799285        case VMX_EXIT_INT_INFO_TYPE_NMI:
    13480             return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
     9286            return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
     9287#endif
    134819288
    134829289        /*
     
    1422310030    {
    1422410031        Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
    14225         vmxHCReadExitInstrLenVmcs(ppVCpu, VmxTransient);
     10032        vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    1422610033        vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    1422710034        vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette